id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26,000 | makeDataset.py | numenta_nupic-legacy/src/nupic/datafiles/extra/secondOrder/makeDataset.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file_record_stream import FileRecordStream
def _generateModel0(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model0'. For this model, we generate the following
set of sequences:
1-2-3 (4X)
1-2-4 (1X)
5-2-3 (1X)
5-2-4 (4X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# ===============================================================
# Let's model the following:
# a-b-c (4X)
# a-b-d (1X)
# e-b-c (1X)
# e-b-d (4X)
# --------------------------------------------------------------------
# Initial probabilities, 'a' and 'e' equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[4] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 'a' and 'e' should lead to 'b'
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 4:
probs.fill(0)
probs[1] = 1.0 # lead only to b
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# a-b should lead to c 80% and d 20%
# e-b should lead to c 20% and d 80%
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,1]):
probs.fill(0)
probs[2] = 0.80 # 'ab' leads to 'c' 80% of the time
probs[3] = 0.20 # 'ab' leads to 'd' 20% of the time
elif key == str([4,1]):
probs.fill(0)
probs[2] = 0.20 # 'eb' leads to 'c' 20% of the time
probs[3] = 0.80 # 'eb' leads to 'd' 80% of the time
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3)
def _generateModel1(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model1'. For this model, we generate the following
set of sequences:
0-10-15 (1X)
0-11-16 (1X)
0-12-17 (1X)
0-13-18 (1X)
0-14-19 (1X)
1-10-20 (1X)
1-11-21 (1X)
1-12-22 (1X)
1-13-23 (1X)
1-14-24 (1X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# --------------------------------------------------------------------
# Initial probabilities, 0 and 1 equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[1] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 0 and 1 should lead to 10,11,12,13,14 with equal probability
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 1:
indices = numpy.array([10,11,12,13,14])
probs.fill(0)
probs[indices] = 1.0 # lead only to b
probs /= probs.sum()
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# 0-10 should lead to 15
# 0-11 to 16
# ...
# 1-10 should lead to 20
# 1-11 shold lean to 21
# ...
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,10]):
probs.fill(0)
probs[15] = 1
elif key == str([0,11]):
probs.fill(0)
probs[16] = 1
elif key == str([0,12]):
probs.fill(0)
probs[17] = 1
elif key == str([0,13]):
probs.fill(0)
probs[18] = 1
elif key == str([0,14]):
probs.fill(0)
probs[19] = 1
elif key == str([1,10]):
probs.fill(0)
probs[20] = 1
elif key == str([1,11]):
probs.fill(0)
probs[21] = 1
elif key == str([1,12]):
probs.fill(0)
probs[22] = 1
elif key == str([1,13]):
probs.fill(0)
probs[23] = 1
elif key == str([1,14]):
probs.fill(0)
probs[24] = 1
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3)
def _generateModel2(numCategories, alpha=0.25):
""" Generate the initial, first order, and second order transition
probabilities for 'model2'. For this model, we generate peaked random
transitions using dirichlet distributions.
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
alpha: Determines the peakedness of the transitions. Low alpha
values (alpha=0.01) place the entire weight on a single
transition. Large alpha values (alpha=10) distribute the
evenly among all transitions. Intermediate values (alpha=0.5)
give a moderately peaked transitions.
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table. None means infinite
length.
Here is an example of some return values for an intermediate alpha value:
initProb: [0.33, 0.33, 0.33]
firstOrder: {'[0]': [0.2, 0.7, 0.1],
'[1]': [0.1, 0.1, 0.8],
'[2]': [0.1, 0.0, 0.9]}
secondOrder: {'[0,0]': [0.1, 0.0, 0.9],
'[0,1]': [0.0, 0.2, 0.8],
'[0,2]': [0.1, 0.8, 0.1],
...
'[2,2]': [0.8, 0.2, 0.0]}
"""
# --------------------------------------------------------------------
# All initial probabilities, are equally likely
initProb = numpy.ones(numCategories)/numCategories
def generatePeakedProbabilities(lastIdx,
numCategories=numCategories,
alpha=alpha):
probs = numpy.random.dirichlet(alpha=[alpha]*numCategories)
probs[lastIdx] = 0.0
probs /= probs.sum()
return probs
# --------------------------------------------------------------------
# 1st order transitions
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = generatePeakedProbabilities(catIdx)
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = generatePeakedProbabilities(secondIdx)
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, None)
def _generateFile(filename, numRecords, categoryList, initProb,
firstOrderProb, secondOrderProb, seqLen, numNoise=0, resetsEvery=None):
""" Generate a set of records reflecting a set of probabilities.
Parameters:
----------------------------------------------------------------
filename: name of .csv file to generate
numRecords: number of records to generate
categoryList: list of category names
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrderProb: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrderProb: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table. None means infinite
length.
numNoise: Number of noise elements to place between each
sequence. The noise elements are evenly distributed from
all categories.
resetsEvery: If not None, generate a reset every N records
Here is an example of some parameters:
categoryList: ['cat1', 'cat2', 'cat3']
initProb: [0.7, 0.2, 0.1]
firstOrderProb: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrderProb: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# Create the file
print "Creating %s..." % (filename)
fields = [('reset', 'int', 'R'), ('name', 'string', '')]
outFile = FileRecordStream(filename, write=True, fields=fields)
# --------------------------------------------------------------------
# Convert the probabilitie tables into cumulative probabilities
initCumProb = initProb.cumsum()
firstOrderCumProb = dict()
for (key,value) in firstOrderProb.iteritems():
firstOrderCumProb[key] = value.cumsum()
secondOrderCumProb = dict()
for (key,value) in secondOrderProb.iteritems():
secondOrderCumProb[key] = value.cumsum()
# --------------------------------------------------------------------
# Write out the sequences
elementsInSeq = []
numElementsSinceReset = 0
maxCatIdx = len(categoryList) - 1
for i in xrange(numRecords):
# Generate a reset?
if numElementsSinceReset == 0:
reset = 1
else:
reset = 0
# Pick the next element, based on how are we are into the 2nd order
# sequence.
rand = numpy.random.rand()
if len(elementsInSeq) == 0:
catIdx = numpy.searchsorted(initCumProb, rand)
elif len(elementsInSeq) == 1:
catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand)
elif (len(elementsInSeq) >=2) and \
(seqLen is None or len(elementsInSeq) < seqLen-numNoise):
catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-2:])], rand)
else: # random "noise"
catIdx = numpy.random.randint(len(categoryList))
# Write out the record
catIdx = min(maxCatIdx, catIdx)
outFile.appendRecord([reset,categoryList[catIdx]])
#print categoryList[catIdx]
# ------------------------------------------------------------
# Increment counters
elementsInSeq.append(catIdx)
numElementsSinceReset += 1
# Generate another reset?
if resetsEvery is not None and numElementsSinceReset == resetsEvery:
numElementsSinceReset = 0
elementsInSeq = []
# Start another 2nd order sequence?
if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise):
elementsInSeq = []
outFile.close()
def generate(model, filenameTrain, filenameTest, filenameCategory,
numCategories=178, numTrainingRecords=1000,
numTestingRecords=100, numNoise=5, resetsEvery=None):
numpy.random.seed(41)
# =====================================================================
# Create our categories and category file.
print "Creating %s..." % (filenameCategory)
categoryList = ['cat%d' % i for i in range(1, numCategories+1)]
categoryFile = open(filenameCategory, 'w')
for category in categoryList:
categoryFile.write(category+'\n')
categoryFile.close()
# ====================================================================
# Generate the model
if model == 'model0':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel0(numCategories)
elif model == 'model1':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel1(numCategories)
elif model == 'model2':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel2(numCategories)
else:
raise RuntimeError("Unsupported model")
# ====================================================================
# Generate the training and testing files
_generateFile(filename=filenameTrain, numRecords=numTrainingRecords,
categoryList=categoryList, initProb=initProb,
firstOrderProb=firstOrderProb, secondOrderProb=secondOrderProb,
seqLen=seqLen, numNoise=numNoise, resetsEvery=resetsEvery)
_generateFile(filename=filenameTest, numRecords=numTestingRecords,
categoryList=categoryList, initProb=initProb,
firstOrderProb=firstOrderProb, secondOrderProb=secondOrderProb,
seqLen=seqLen, numNoise=numNoise, resetsEvery=resetsEvery)
| 18,935 | Python | .py | 408 | 36.301471 | 84 | 0.537595 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,001 | makeDataset.py | numenta_nupic-legacy/src/nupic/datafiles/extra/hotgym/raw/makeDataset.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unify the various HotGym CSV files to a single coherent StandardFile
See README.txt for details
"""
import os
import sys
import glob
import operator
import datetime
"""from nupic.providers.WeatherProvider import (
WeatherStation,
getClosestStation)
"""
from nupic.data.file import File
def _parseTimestamp(t):
tokens = t.split()
day, month, year = [int(x) for x in tokens[0].split('/')]
if len(tokens) == 1:
hour = 0
minute = 0
else:
assert len(tokens) == 3
hour, minute, seconds = [int(x) for x in tokens[1].split(':')]
hour %= 12
if tokens[2] == 'PM':
hour += 12
result = datetime.datetime(year, month, day, hour, minute)
assert datetime.datetime(2010, 7, 2) <= result < datetime.datetime(2011, 1, 1)
return result
def _parseLine(line):
# Get rid of the double quotes arounf each field
line = line.replace('"', '')
# Split the line and get rid of the first field (running count)
fields = line[:-1].split(',')[1:]
gym = fields[0]
record = [gym] # Gym
# Add in an address for each Gym
gymAddresses = {
'Balgowlah Platinum': 'Shop 67 197-215 Condamine Street Balgowlah 2093',
'Lane Cove': '24-28 Lane Cove Plaza Lane Cove 2066',
'Mosman': '555 Military Rd Mosman 2088',
'North Sydney - Walker St': '100 Walker St North Sydney 2060',
'Randwick': 'Royal Randwick Shopping Centre 73 Belmore Rd Randwick 2031'
}
address = gymAddresses[gym]
record.append(address)
# Parse field 2 to a datetime object
record.append(_parseTimestamp(fields[1]))
# Add the consumption
record.append(float(fields[2]))
return record
def makeDataset():
"""
"""
inputFile = 'numenta_air_Con.csv'
fields = [
('gym', 'string', 'S'),
('address', 'string', ''),
('timestamp', 'datetime', 'T'),
('consumption', 'float', '')]
gymName = None
missing = 0
total = 0
# Create a the output file by parsing the customer given csv
with File('./hotgym2.csv', fields) as o:
with open(inputFile) as f:
# Skip header
f.readline()
# iterate over all the lines in the input file
for line in f.xreadlines():
# Parse the fields in the current line
record = _parseLine(line)
# Write the merged record to the output file
o.write(record)
if record[0] != gymName:
gymName = record[0]
print gymName
return total, missing
if __name__ == '__main__':
makeDataset()
print 'Done.'
| 3,532 | Python | .py | 100 | 30.99 | 80 | 0.662462 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,002 | cross.py | numenta_nupic-legacy/src/nupic/math/cross.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
__all__ = ["cross_list", "cross", "combinations"]
def cross_list(*sequences):
"""
From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html
"""
result = [[ ]]
for seq in sequences:
result = [sublist+[item] for sublist in result for item in seq]
return result
def cross(*sequences):
"""
From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html
"""
# visualize an odometer, with "wheels" displaying "digits"...:
wheels = map(iter, sequences)
digits = [it.next( ) for it in wheels]
while True:
yield tuple(digits)
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = wheels[i].next( )
break
except StopIteration:
wheels[i] = iter(sequences[i])
digits[i] = wheels[i].next( )
else:
break
def dcross(**keywords):
"""
Similar to cross(), but generates output dictionaries instead of tuples.
"""
keys = keywords.keys()
# Could use keywords.values(), but unsure whether the order
# the values come out in is guaranteed to be the same as that of keys
# (appears to be anecdotally true).
sequences = [keywords[key] for key in keys]
wheels = map(iter, sequences)
digits = [it.next( ) for it in wheels]
while True:
yield dict(zip(keys, digits))
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = wheels[i].next( )
break
except StopIteration:
wheels[i] = iter(sequences[i])
digits[i] = wheels[i].next( )
else:
break
def combinations(n, c):
m = n - 1
positions = range(c)
while True:
yield tuple(positions)
success = False
lastPi = m
for i in xrange(c-1, -1, -1):
pi = positions[i]
if pi < lastPi:
pi += 1
for j in xrange(i, c):
positions[j] = pi
pi += 1
success = True
break
lastPi = pi-1
if not success: break # Done.
def permutations(x):
if len(x) > 1:
for permutation in permutations(x[1:]):
# Stick the first digit in every position.
for i in xrange(len(permutation)+1):
yield permutation[:i] + x[0:1] + permutation[i:]
else: yield x
| 3,241 | Python | .py | 94 | 30.202128 | 121 | 0.641606 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,003 | proposal.py | numenta_nupic-legacy/src/nupic/math/proposal.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
raise Exception("XERROR proposal not available")
from nupic.math.dist import *
class Proposal(object):
# Must support the following methods:
# sample, logForward, logBackward = propose(currentValue, randomNumberGenerator)
# None = adapt(accepted)
def adapt(self, accepted):
pass
class RangeWrapper(Proposal):
def __init__(self, prop, min=None, max=None, offset=0, maxIterations=1000):
self.prop = prop
self.offset = offset
self.min = min
self.max = max
self.maxIterations = 1000
def propose(self, inCurrent, r):
iterations = 0
maxIterations = 10
done = False
prop = None
value = None
while not done:
if self.offset is not None: current = inCurrent - self.offset
else: current = inCurrent
prop = self.prop.propose(current, r)
value = prop[0]
if self.offset is not None: value = value + self.offset
done = True
if (self.min is not None) and (value < self.min): done = False
if (self.max is not None) and (value > self.max): done = False
if iterations >= self.maxIterations:
raise RuntimeError("Failed to sample in %d iterations." % self.maxIterations)
prop = tuple([value] + list(prop[1:]))
return prop
def toOdds(p): return p / (1.0 - p)
def toProbability(o): return o / (1.0 + o)
def logit(p): return numpy.log(p / (1.0 - p))
def invlogit(lo):
o = numpy.exp(lo)
return o / (1.0 + o)
def estimateProportion(x, n, prior=0.5):
prop = (x + prior) / (n + 2.0 * prior)
return prop
class CircularQueue(object):
def __init__(self, n):
self.queue = [None] * n
self.max = n
self.n = 0
def add(self, x):
self.queue[self.n % self.max] = x
self.n += 1
def clear(self):
self.n = 0
def getEarliest(self):
if self.n > self.max: return self.queue[(self.n+1) % self.max]
else:
assert self.n
return self.queue[0]
class MovingAverage(CircularQueue):
def __init__(self, n, prior=None):
CircularQueue.__init__(self, n)
if prior is not None:
self.pn = prior
self.pd = 1.0
else:
self.pn = 0.0
self.pd = 0.0
self.sum = 0.0
def add(self, x):
rem = 0
if self.n >= self.max: rem = self.queue[(self.n+1) % self.max]
CircularQueue.add(self, x)
self.sum += (x - rem)
def clear(self):
self.sum = 0
CircularQueue.clear(self)
def get(self):
return (self.sum + self.pn) / (min(self.n, self.max) + self.pd)
def getSum(self):
return (self.sum + self.pn), (min(self.n, self.max) + self.pd)
class TransitionKernel(Proposal):
def __init__(self, proposal, kernel, adaptiveWindow=None, target=0.5):
self.prop = proposal
if hasattr(kernel, "__iter__"):
assert len(kernel) in [2, 3]
if len(kernel) == 2:
self.minKernel, self.maxKernel = kernel
self.kernel = (kernel[0] + kernel[1]) / 2.0
elif len(kernel) == 3:
self.minKernel, self.kernel, self.maxKernel = kernel
else:
raise RuntimeError("Specify kernel as 1 number, a range, or a 3-tuple "
"with (min, start, max)")
else:
self.kernel = kernel
self.minKernel = min(0.01, kernel) # Assume a default bounds.
self.maxKernel = max(0.99, kernel) # Assume a default bounds.
self.target = target
self.accepted = None
if adaptiveWindow:
self.accepted = MovingAverage(adaptiveWindow, prior=0.5)
def propose(self, current, r):
# Adapt if have sufficient data.
willAdaptForward = False
willAdaptBackward = False
if self.accepted:
minAdapt = self.accepted.max
willAdaptForward = ((self.accepted.n+0) >= minAdapt)
willAdaptBackward = ((self.accepted.n+1) >= minAdapt)
kernelForward = self.kernel
kernelBackward = kernelForward
targetOdds = logit(self.target)
if willAdaptForward:
obsOdds = logit(self.accepted.get())
weight = 0.1
logOR = weight * (targetOdds - obsOdds)
kernelForward = invlogit(logit(self.kernel) + logOR)
kernelForward = max(self.minKernel, min(self.maxKernel, kernelForward))
if willAdaptBackward:
# Now consider the backward direction.
a, b = self.accepted.getSum()
if (self.accepted.n >= self.accepted.max):
nextOdds = logit(estimateProportion(a + 1 - self.accepted.getEarliest(), b))
else:
nextOdds = logit(estimateProportion(a + 1, b + 1))
nextWeight = 0.1
nextLogOR = nextWeight * (targetOdds - nextOdds)
kernelBackward = invlogit(logit(kernelForward) + nextLogOR)
kernelBackward = max(self.minKernel, min(self.maxKernel, kernelBackward))
if willAdaptForward or willAdaptBackward:
if not "obsOdds" in locals():
obsOdds = logit(self.accepted.get())
print " Adapting:", obsOdds, "->", targetOdds
print " Adapted:", self.kernel, kernelForward, kernelBackward
self.kernel = kernelForward
# Perform the proposal.
stay = (r.uniform(0, 1) < kernelForward)
if stay:
# print "stay"
return (current, numpy.log(kernelForward), numpy.log(kernelBackward))
else:
# print "propose"
while True: # Rejection sample to avoid the case of staying in one place.
# Otherwise, our 'stay' probability will be wrong.
proposed, logForward, logBackward = self.prop.propose(current, r)
if proposed != current: break
return (proposed, (logForward + numpy.log(1.0 - kernelForward)),
(logBackward + numpy.log(1.0 - kernelBackward)))
def adapt(self, accepted):
if self.accepted: self.accepted.add(int(accepted))
class DiscreteProposal(Proposal):
def __init__(self, keys, kernel):
self.keys = [i for i in keys]
self.keyMap = dict((key, i) for (i, key) in enumerate(self.keys))
nKeys = len(keys)
assert nKeys > 0
self.nKeys = nKeys
if nKeys == 1:
kernel = 1.0
self.logp = 0
else:
self.logp = -numpy.log(nKeys-1)
assert (kernel >= 0) and (kernel <= 1.0)
self.kernel = kernel
def propose(self, current, r):
"""Generates a random sample from the discrete probability distribution and
returns its value, the log of the probability of sampling that value and the
log of the probability of sampling the current value (passed in).
"""
stay = (r.uniform(0, 1) < self.kernel)
if stay:
logKernel = numpy.log(self.kernel)
return current, logKernel, logKernel
else: # Choose uniformly, not according to the pmf.
curIndex = self.keyMap[current]
ri = r.randint(0, self.nKeys-1)
logKernel = numpy.log(1.0 - self.kernel)
lp = logKernel + self.logp
if ri < curIndex: return self.keys[ri], lp, lp
else: return self.keys[ri+1], lp, lp
class TwoFishProposal(Proposal):
def __init__(self, scale, minVal=-numpy.inf, maxVal=numpy.inf):
self.dist = PoissonDistribution(scale)
self.minVal = minVal
self.maxVal = maxVal
@staticmethod
def proposePositive(dist, minVal, maxVal, r):
if minVal > 0: raise RuntimeError("Current value is outside legal range.")
elif maxVal < 0: raise RuntimeError("Current value is outside legal range.")
cdf = dist.cdf(maxVal)
while 1:
diff, logProb = dist.sample(r)
if diff <= maxVal: break
rrange = diff - minVal
rcdf = dist.cdf(rrange)
rlp = dist.logProbability(diff)
log2 = numpy.log(2) # The half chance we went up, rather than down.
logForward = logProb - numpy.log(cdf) - log2
logBackward = rlp - numpy.log(rcdf) - log2
return diff, logForward, logBackward
def propose(self, current, r):
up = r.randint(2)
if up:
diff, logForward, logBackward = TwoFishProposal.proposePositive(
self.dist, self.minVal - current, self.maxVal - current, r)
return current + diff, logForward, logBackward
else:
diff, logForward, logBackward = TwoFishProposal.proposePositive(
self.dist, current - self.maxVal, current - self.minVal, r)
return current - diff, logForward, logBackward
class PoissonProposal(Proposal):
def __init__(self, offset=0.1):
self.offset = offset
def propose(self, current, r):
"""Generates a random sample from the Poisson probability distribution with
with location and scale parameter equal to the current value (passed in).
Returns the value of the random sample, the log of the probability of
sampling that value, and the log of the probability of sampling the current
value if the roles of the new sample and the current sample were reversed
(the log of the backward proposal probability).
"""
curLambda = current + self.offset
x, logProb = PoissonDistribution(curLambda).sample(r)
logBackward = PoissonDistribution(x+self.offset).logDensity(current)
return x, logProb, logBackward
class NormalProposal(Proposal):
def __init__(self, sd):
self.dist = NormalDistribution(0, sd)
def propose(self, current, r):
x, logProb = self.dist.sample(r)
return x + current, logProb, logProb
class LogNormalProposal(Proposal):
def __init__(self, normalSD):
self.sd = normalSD
def propose(self, current, r):
dist = LogNormalDistribution(numpy.log(current), self.sd)
x, logDensity = dist.sample(r)
# Switch to new center, look at backward density.
dist.setNormalMean(numpy.log(x))
return x, logDensity, dist.logDensity(current)
class GammaProposal(Proposal):
def __init__(self, shape, offset=0.001):
self.shape = float(shape)
self.offset = offset
def propose(self, current, r):
forwardScale = max((current + self.offset) / self.shape, 0)
fdist = GammaDistribution(self.shape, forwardScale)
x, logForward = fdist.sample(r)
# backwardScale = max((x + self.offset) / self.shape, 0)
backwardScale = (x + self.offset) / self.shape
bdist = GammaDistribution(self.shape, backwardScale)
return x, logForward, bdist.logDensity(current)
| 10,882 | Python | .py | 272 | 34.985294 | 85 | 0.672277 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,004 | roc_utils.py | numenta_nupic-legacy/src/nupic/math/roc_utils.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utility functions to compute ROC (Receiver Operator Characteristic) curves
and AUC (Area Under the Curve).
The ROCCurve() and AreaUnderCurve() functions are based on the roc_curve()
and auc() functions found in metrics.py module of scikit-learn
(http://scikit-learn.org/stable/). Scikit-learn has a BSD license (3 clause).
Following is the original license/credits statement from the top of the
metrics.py file:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD Style.
"""
import numpy as np
def ROCCurve(y_true, y_score):
"""compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
true binary labels
y_score : array, shape = [n_samples]
target scores, can either be probability estimates of
the positive class, confidence values, or binary decisions.
Returns
-------
fpr : array, shape = [>2]
False Positive Rates
tpr : array, shape = [>2]
True Positive Rates
thresholds : array, shape = [>2]
Thresholds on y_score used to compute fpr and tpr
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
References
----------
http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
y_true = np.ravel(y_true)
classes = np.unique(y_true)
# ROC only for binary classification
if classes.shape[0] != 2:
raise ValueError("ROC is defined for binary classification only")
y_score = np.ravel(y_score)
n_pos = float(np.sum(y_true == classes[1])) # nb of true positive
n_neg = float(np.sum(y_true == classes[0])) # nb of true negative
thresholds = np.unique(y_score)
neg_value, pos_value = classes[0], classes[1]
tpr = np.empty(thresholds.size, dtype=np.float) # True positive rate
fpr = np.empty(thresholds.size, dtype=np.float) # False positive rate
# Build tpr/fpr vector
current_pos_count = current_neg_count = sum_pos = sum_neg = idx = 0
signal = np.c_[y_score, y_true]
sorted_signal = signal[signal[:, 0].argsort(), :][::-1]
last_score = sorted_signal[0][0]
for score, value in sorted_signal:
if score == last_score:
if value == pos_value:
current_pos_count += 1
else:
current_neg_count += 1
else:
tpr[idx] = (sum_pos + current_pos_count) / n_pos
fpr[idx] = (sum_neg + current_neg_count) / n_neg
sum_pos += current_pos_count
sum_neg += current_neg_count
current_pos_count = 1 if value == pos_value else 0
current_neg_count = 1 if value == neg_value else 0
idx += 1
last_score = score
else:
tpr[-1] = (sum_pos + current_pos_count) / n_pos
fpr[-1] = (sum_neg + current_neg_count) / n_neg
# hard decisions, add (0,0)
if fpr.shape[0] == 2:
fpr = np.array([0.0, fpr[0], fpr[1]])
tpr = np.array([0.0, tpr[0], tpr[1]])
# trivial decisions, add (0,0) and (1,1)
elif fpr.shape[0] == 1:
fpr = np.array([0.0, fpr[0], 1.0])
tpr = np.array([0.0, tpr[0], 1.0])
return fpr, tpr, thresholds
def AreaUnderCurve(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
"""
#x, y = check_arrays(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('x and y should have the same shape'
' to compute area under curve,'
' but x.shape = %s and y.shape = %s.'
% (x.shape, y.shape))
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
# reorder the data points according to the x axis
order = np.argsort(x)
x = x[order]
y = y[order]
h = np.diff(x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area
def _printNPArray(x, precision=2):
format = "%%.%df" % (precision)
for elem in x:
print format % (elem),
print
def _test():
"""
This is a toy example, to show the basic functionality:
The dataset is:
actual prediction
-------------------------
0 0.1
0 0.4
1 0.5
1 0.3
1 0.45
Some ROC terminology:
A True Positive (TP) is when we predict TRUE and the actual value is 1.
A False Positive (FP) is when we predict TRUE, but the actual value is 0.
The True Positive Rate (TPR) is TP/P, where P is the total number of actual
positives (3 in this example, the last 3 samples).
The False Positive Rate (FPR) is FP/N, where N is the total number of actual
negatives (2 in this example, the first 2 samples)
Here are the classifications at various choices for the threshold. The
prediction is TRUE if the predicted value is >= threshold and FALSE otherwise.
actual pred 0.50 0.45 0.40 0.30 0.10
---------------------------------------------------------
0 0.1 0 0 0 0 1
0 0.4 0 0 1 1 1
1 0.5 1 1 1 1 1
1 0.3 0 0 0 1 1
1 0.45 0 1 1 1 1
TruePos(TP) 1 2 2 3 3
FalsePos(FP) 0 0 1 1 2
TruePosRate(TPR) 1/3 2/3 2/3 3/3 3/3
FalsePosRate(FPR) 0/2 0/2 1/2 1/2 2/2
The ROC curve is a plot of FPR on the x-axis and TPR on the y-axis. Basically,
one can pick any operating point along this curve to run, the operating point
determined by which threshold you want to use. By changing the threshold, you
tradeoff TP's for FPs.
The more area under this curve, the better the classification algorithm is.
The AreaUnderCurve() function can be used to compute the area under this
curve.
"""
yTrue = np.array([0, 0, 1, 1, 1])
yScore = np.array([0.1, 0.4, 0.5, 0.3, 0.45])
(fpr, tpr, thresholds) = ROCCurve(yTrue, yScore)
print "Actual: ",
_printNPArray(yTrue)
print "Predicted: ",
_printNPArray(yScore)
print
print "Thresholds:",
_printNPArray(thresholds[::-1])
print "FPR(x): ",
_printNPArray(fpr)
print "TPR(y): ",
_printNPArray(tpr)
print
area = AreaUnderCurve(fpr, tpr)
print "AUC: ", area
if __name__=='__main__':
_test()
| 8,308 | Python | .py | 209 | 34.473684 | 80 | 0.590586 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,005 | mvn.py | numenta_nupic-legacy/src/nupic/math/mvn.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
raise Exception("XERROR -- removing for NuPIC 2")
import numpy
from numpy.linalg import svd
log2pi = numpy.log(2.0 * numpy.pi)
def cov(data, mean=None, prior=None, dfOffset=-1):
x = numpy.asmatrix(data)
if mean is None: mean = x.mean()
xc = x - mean
xxt = xc.T * xc
n = x.shape[0]
if prior is not None:
assert len(prior) == 2, "Must be of the form (n, SS)"
n += prior[0]
print prior[1].shape
xxt += prior[1]
n += dfOffset
assert n > 0
return (1.0 / n) * xxt
def getRank(d, s):
if numpy.min(s) > 0: return d
else: return max(numpy.argmin(s > 0), 1)
class mvn(object):
def __init__(self, mean, varcov):
if mean is not None: self.mean = numpy.asarray(mean)
else: self.mean = None
varcov = numpy.asmatrix(varcov)
self.d = varcov.shape[1]
self.varcov = varcov
self.u, self.s, self.vt = svd(varcov, full_matrices=0, compute_uv=1)
self.rank = getRank(self.d, self.s)
def __str__(self):
return "Mean:\n" + str(self.mean) + "\nCovariance:\n" + str(self.varcov)
def __repr__(self):
return "Mean:\n" + repr(self.mean) + "\nCovariance:\n" + repr(self.varcov)
def limitRank(self, minvar):
if numpy.min(self.s) > minvar: self.rank = self.d
else:
self.rank = max(numpy.argmin(self.s > minvar), 1)
def setRank(self, rank):
assert rank <= self.d
assert rank >= 1
self.rank = rank
def s0(self):
s = numpy.zeros(len(self.s))
s[0:self.rank] = self.s[0:self.rank]
s[self.rank:] = 0
return s
def si(self):
si = numpy.zeros(len(self.s))
si[0:self.rank] = 1.0 / self.s[0:self.rank]
si[self.rank:] = 0
return si
def sigma(self):
return self.u * numpy.asmatrix(numpy.diag(self.s0())) * self.vt
def sigmai(self):
# return self.vt.T * numpy.asmatrix(numpy.diag(self.si())) * self.u.T
return self.u * numpy.asmatrix(numpy.diag(self.si())) * self.vt
def rightRoot(self):
return numpy.asmatrix(numpy.diag(numpy.sqrt(self.s0()))) * self.vt
def leftRoot(self):
return self.u * numpy.asmatrix(numpy.diag(numpy.sqrt(self.s0())))
def leftInvRoot(self):
# return self.vt.T * numpy.asmatrix(numpy.diag(numpy.sqrt(self.si())))
return self.u * numpy.asmatrix(numpy.diag(numpy.sqrt(self.si())))
def rightInvRoot(self):
# return numpy.asmatrix(numpy.diag(numpy.sqrt(self.si()))) * self.u.T
return numpy.asmatrix(numpy.diag(numpy.sqrt(self.si()))) * self.vt
def sample(self, r=None, n=1):
if r is None: r = numpy.random
z = r.normal(0, 1, (n, self.d))
return z * self.rightRoot() + self.mean
def center(self, x):
x = numpy.asmatrix(x)
assert x.shape[1] == self.d
if self.mean is not None: return (x - self.mean)
else: return x
def whiten(self, x):
xc = self.center(x)
# Whiten.
z = xc * self.leftInvRoot()
return z
def z2(self, x):
z = self.whiten(x)
# Avoid matrix multiplication, just square the rows.
z = numpy.asarray(z)
z2 = z * z
return numpy.sum(z2, axis=1)
def logDetSigma(self):
return numpy.sum(numpy.log(self.s[0:self.rank]))
def logDetInvRoot(self):
return -0.5 * self.logDetSigma()
def logK(self):
return -0.5 * self.rank * log2pi
def logLikelihood(self, x):
z = numpy.asarray(self.whiten(x))
# z2 = numpy.sum(z * z, axis=1)
n = len(z)
return -0.5 * ( n*(self.rank * log2pi + self.logDetSigma()) + numpy.sum(z*z) )
def logLikelihoods(self, x):
z2 = self.z2(x)
return self.logK() + self.logDetInvRoot() - 0.5 * z2
def logDensity(self, x):
return self.logLikelihood(x)
class MaskBuilder(object):
def __init__(self, d):
self.indices = numpy.arange(d)
def __getitem__(self, *args):
return bits
class ConditionalMVN(object):
def __init__(self, mvn):
self.mvn = mvn
class Impl(object):
def __init__(self, mean1, mean2, Sigma11, Sigma12, Sigma22):
Sigma11 = numpy.asmatrix(Sigma11)
Sigma12 = numpy.asmatrix(Sigma12)
Sigma22 = numpy.asmatrix(Sigma22)
u22, s22, vt22 = svd(Sigma22, full_matrices=0, compute_uv=1)
rank22 = getRank(Sigma22.shape[1], s22)
s22i = numpy.zeros(len(s22))
s22i[0:rank22] = 1.0 / s22[0:rank22]
# Rest are zeroes.
Sigma22i = u22 * numpy.asmatrix(numpy.diag(s22i)) * vt22
self.mean1 = mean1
self.mean2 = mean2
self.Sigma11 = Sigma11
self.Sigma12 = Sigma12
self.Sigma22i = Sigma22i
def getDistribution(self, given):
given_from_mean = given - self.mean2
# Keep means in row form.
# mean = self.mean1 + self.Sigma12 * self.Sigma22i * given_from_mean
mean = self.mean1 + given_from_mean * self.Sigma22i * self.Sigma12.transpose()
varcov = self.Sigma11 - (self.Sigma12 * self.Sigma22i * (self.Sigma12.transpose()))
return mvn(mean, varcov)
def logDensity(self, x, given):
return getDistribution(given).logDensity(x)
def __getitem__(self, *args):
d = self.mvn.d
indices = numpy.arange(d).__getitem__(*args)
bits = numpy.repeat(False, d)
bits[indices] = True
givenMask = bits # Should it be this way, or the other way around?
varMask = ~givenMask
C22 = self.mvn.varcov[givenMask, ...][..., givenMask]
C12 = self.mvn.varcov[varMask, ...][..., givenMask]
C11 = self.mvn.varcov[varMask, ...][..., varMask]
return ConditionalMVN.Impl(self.mvn.mean[varMask], self.mvn.mean[givenMask],
C11, C12, C22)
| 6,412 | Python | .py | 168 | 33.809524 | 89 | 0.646518 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,006 | __init__.py | numenta_nupic-legacy/src/nupic/math/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This package contains modules related to mathematical, probabilistic and
statistical data structures and simple algorithms.
"""
import sys
import math as coremath # The core Python module.
# bitstringToArray/CMultiArgMax are not part of NuPIC2
from nupic.bindings.math import (GetNTAReal,
GetNumpyDataType,
SparseMatrix, SparseTensor,
TensorIndex, Domain)
from nupic.bindings.math import lgamma, erf
def choose(n, c):
return int(round(coremath.exp(logChoose(n, c))))
def logChoose(n, c):
return lgamma(n+1) - lgamma(c+1) - lgamma(n-c+1)
# __all__ affects what symbols match "*"
# set __all__ so that "from math import *" doesn't clobber "sys"
__all__ = [
"GetNTAReal", "GetNumpyDataType",
"SparseMatrix", "SparseTensor", "TensorIndex", "Domain", "choose", "logChoose"]
__all__.extend(["CMultiArgMax", "bitstringToArray",
"pickByDistribution", "ConditionalProbabilityTable2D", "MultiIndicator", "Indicator"])
| 2,015 | Python | .py | 43 | 42.930233 | 90 | 0.672273 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,007 | logarithms.py | numenta_nupic-legacy/src/nupic/math/logarithms.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
def similar(a, b, eps=0.001):
return (numpy.abs(a - b) < eps).all()
def lscsum(lx, epsilon=None):
"""
Accepts log-values as input, exponentiates them, computes the sum,
then converts the sum back to log-space and returns the result.
Handles underflow by rescaling so that the largest values is exactly 1.0.
"""
lx = numpy.asarray(lx)
base = lx.max()
# If the input is the log of 0's, catch this condition before we generate
# an exception, and return the log(0)
if numpy.isinf(base):
return base
# If the user specified an epsilon and we are below it, return epsilon
if (epsilon is not None) and (base < epsilon):
return epsilon
x = numpy.exp(lx - base)
ssum = x.sum()
result = numpy.log(ssum) + base
# try:
# conventional = numpy.log(numpy.exp(lx).sum())
# if not similar(result, conventional):
# if numpy.isinf(conventional).any() and not numpy.isinf(result).any():
# # print "Scaled log sum avoided underflow or overflow."
# pass
# else:
# import sys
# print >>sys.stderr, "Warning: scaled log sum did not match."
# print >>sys.stderr, "Scaled log result:"
# print >>sys.stderr, result
# print >>sys.stderr, "Conventional result:"
# print >>sys.stderr, conventional
# except FloatingPointError, e:
# # print "Scaled log sum avoided underflow or overflow."
# pass
return result
def lscsum0(lx):
"""
Accepts log-values as input, exponentiates them, sums down the rows
(first dimension), then converts the sum back to log-space and returns the result.
Handles underflow by rescaling so that the largest values is exactly 1.0.
"""
# rows = lx.shape[0]
# columns = numpy.prod(lx.shape[1:])
# lx = lx.reshape(rows, columns)
# bases = lx.max(1).reshape(rows, 1)
# bases = lx.max(0).reshape((1,) + lx.shape[1:])
lx = numpy.asarray(lx)
bases = lx.max(0) # Don't need to reshape in the case of 0.
x = numpy.exp(lx - bases)
ssum = x.sum(0)
result = numpy.log(ssum) + bases
try:
conventional = numpy.log(numpy.exp(lx).sum(0))
if not similar(result, conventional):
if numpy.isinf(conventional).any() and not numpy.isinf(result).any():
# print "Scaled log sum down axis 0 avoided underflow or overflow."
pass
else:
import sys
print >>sys.stderr, "Warning: scaled log sum down axis 0 did not match."
print >>sys.stderr, "Scaled log result:"
print >>sys.stderr, result
print >>sys.stderr, "Conventional result:"
print >>sys.stderr, conventional
except FloatingPointError, e:
# print "Scaled log sum down axis 0 avoided underflow or overflow."
pass
return result
def normalize(lx):
"""
Accepts log-values as input, exponentiates them,
normalizes and returns the result.
Handles underflow by rescaling so that the largest values is exactly 1.0.
"""
lx = numpy.asarray(lx)
base = lx.max()
x = numpy.exp(lx - base)
result = x / x.sum()
conventional = (numpy.exp(lx) / numpy.exp(lx).sum())
assert similar(result, conventional)
return result
def nsum0(lx):
"""
Accepts log-values as input, exponentiates them, sums down the rows
(first dimension), normalizes and returns the result.
Handles underflow by rescaling so that the largest values is exactly 1.0.
"""
lx = numpy.asarray(lx)
base = lx.max()
x = numpy.exp(lx - base)
ssum = x.sum(0)
result = ssum / ssum.sum()
conventional = (numpy.exp(lx).sum(0) / numpy.exp(lx).sum())
assert similar(result, conventional)
return result
def lnsum0(lx):
"""
Accepts log-values as input, exponentiates them, sums down the rows
(first dimension), normalizes, then converts the sum back to
log-space and returns the result.
Handles underflow by rescaling so that the largest values is exactly 1.0.
"""
lx = numpy.asarray(lx)
base = lx.max()
x = numpy.exp(lx - base)
ssum = x.sum(0)
normalized = nsum0(lx)
result = numpy.log(normalized)
conventional = numpy.log(numpy.exp(lx).sum(0) / numpy.exp(lx).sum())
assert similar(result, conventional)
return result
def logSumExp(A, B, out=None):
""" returns log(exp(A) + exp(B)). A and B are numpy arrays"""
if out is None:
out = numpy.zeros(A.shape)
indicator1 = A >= B
indicator2 = numpy.logical_not(indicator1)
out[indicator1] = A[indicator1] + numpy.log1p(numpy.exp(B[indicator1]-A[indicator1]))
out[indicator2] = B[indicator2] + numpy.log1p(numpy.exp(A[indicator2]-B[indicator2]))
return out
def logDiffExp(A, B, out=None):
""" returns log(exp(A) - exp(B)). A and B are numpy arrays. values in A should be
greater than or equal to corresponding values in B"""
if out is None:
out = numpy.zeros(A.shape)
indicator1 = A >= B
assert indicator1.all(), "Values in the first array should be greater than the values in the second"
out[indicator1] = A[indicator1] + numpy.log(1 - numpy.exp(B[indicator1]-A[indicator1]))
return out
| 5,997 | Python | .py | 152 | 36.125 | 102 | 0.682389 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,008 | dist.py | numenta_nupic-legacy/src/nupic/math/dist.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
raise Exception("XERROR dist not available")
import bisect
from nupic.math import lgamma, logChoose, choose, erf
import numpy
log2pi = numpy.log(2.0 * numpy.pi)
def logFactorial(x):
"""Approximation to the log of the factorial function."""
return lgamma(x + 1.0)
class Distribution(object):
pass
class Binomial(Distribution):
def __init__(self, n, p):
self.n = n
self.p = p
def logProbability(self, x):
return logChoose(self.n, x) + x * numpy.log(self.p) + \
(self.n - x) * numpy.log(1.0 - self.p)
def probability(self, x):
return choose(self.n, x) * (self.p ** x) * \
(1.0 - self.p) ** (self.n - x)
def sample(self, rgen):
x = rgen.binomial(self.n, self.p)
return x, self.logProbability(x)
class DiscreteDistribution(Distribution):
def __init__(self, pmfIn, normalize=True):
# Convert PMF to a CDF.
# Use iteritems() repeatedly here (instead of iterkeys()) to
# make sure the order is preserved.
n = len(pmfIn)
keyMap = dict((item[0], i) for i, item in enumerate(pmfIn.iteritems()))
keys = [item[0] for item in pmfIn.iteritems()]
pmf = numpy.array([item[1] for item in pmfIn.iteritems()], dtype=float)
if normalize:
pmf *= (1.0 / pmf.sum())
# Is there a faster way to accumulate?
cdf = pmf.copy()
for i in xrange(1, n): cdf[i] += cdf[i-1]
self.keys = keys
self.keyMap = keyMap
self.pmf = pmf
self.cdf = cdf
self.sum = self.cdf[-1]
self.scale = 1.0 / self.sum
def sample(self, rgen):
"""Generates a random sample from the discrete probability distribution
and returns its value and the log of the probability of sampling that value.
"""
rf = rgen.uniform(0, self.sum)
index = bisect.bisect(self.cdf, rf)
return self.keys[index], numpy.log(self.pmf[index])
def probability(self, key):
if key in self.keyMap: return self.pmf[self.keyMap[key]] * self.scale
else: return 0
density = probability
def logDensity(self, key):
if key in self.keyMap: return numpy.log(self.pmf[self.keyMap[key]] * self.scale)
else: return -numpy.inf
class MultinomialDistribution(Distribution):
def __init__(self, pmf):
self.dist = DiscreteDistribution(pmf, normalize=True)
def mean(self, n=1):
return n * self.dist.pmf
def var(self, n=1):
return n * self.dist.pmf * (1.0 - self.dist.pmf)
def cov(self, n=1):
cov = - numpy.outer(self.dist.pmf, self.dist.pmf)
diag = self.dist.pmf
return n * (numpy.diag(diag) + cov)
def logProbability(self, distn):
"""Form of distribution must be an array of counts in order of self.keys."""
x = numpy.asarray(distn)
n = x.sum()
return (logFactorial(n) - numpy.sum([logFactorial(k) for k in x]) +
numpy.sum(x * numpy.log(self.dist.pmf)))
def probability(self, distn):
return numpy.exp(self.logProbability(distn))
def density(self, distn):
return numpy.exp(self.logProbability(distn))
def logDensity(self, distn):
return self.logProbability(distn)
def sample(self, rgen, n=1):
return rgen.multinomial(n, self.dist.pmf)
class PoissonDistribution(Distribution):
def __init__(self, lambdaParameter):
self.lambdaParameter = lambdaParameter
self.logLambda = numpy.log(self.lambdaParameter)
def sample(self, rgen):
"""Generates a random sample from the Poisson probability distribution and
returns its value and the log of the probability of sampling that value.
"""
x = rgen.poisson(self.lambdaParameter)
return x, self.logDensity(x)
def probability(self, x):
return numpy.exp( - self.lambdaParameter + self.logLambda * x - logFactorial(x) )
def logProbability(self, x):
return ( - self.lambdaParameter + self.logLambda * x - logFactorial(x) )
def logDensity(self, x):
return ( - self.lambdaParameter + self.logLambda * x - logFactorial(x) )
def cdf(self, x):
if x == numpy.inf: return 1.0
else: # Inefficient sum.
if x != int(x): raise RuntimeError("Invalid value.")
c = 0.0
for i in xrange(x+1):
c += self.probability(i)
return c
class NormalDistribution(Distribution):
def __init__(self, mean, sd):
self.mean = mean
self.sd = sd
self.var = sd * sd
self.logVar = numpy.log(self.var)
self.precision = 1.0 / self.var
def density(self, x): return numpy.exp(self.logDensity(x))
def logDensity(self, x):
dx = x - self.mean
return -0.5 * (log2pi + self.logVar + (dx*dx) * self.precision)
def sample(self, rgen):
x = rgen.normal(self.mean, self.sd)
return x, self.logDensity(x)
def cdf(self, x):
return 0.5 * (1.0 + erf((x - self.mean) / (self.sd * numpy.sqrt(2.0))))
class LogNormalDistribution(Distribution):
def __init__(self, normalMean, normalSD):
self.mean = normalMean
self.sd = normalSD
self.var = normalSD * normalSD
self.logVar = numpy.log(self.var)
self.precision = 1.0 / self.var
def setNormalMean(self, normalMean):
self.mean = normalMean
def logDensity(self, x):
logx = numpy.log(x)
dx = logx - self.mean
return -0.5 * (log2pi + self.logVar + (dx*dx) * self.precision) - logx
def sample(self, rgen):
x = numpy.exp(rgen.normal(self.mean, self.sd))
return x, self.logDensity(x)
class GammaDistribution(Distribution):
def __init__(self, shape, scale=None, beta=None):
self.alpha = shape
if scale is not None:
self.scale = scale
self.beta = 1.0 / scale
assert beta is None, "Specify exactly one of 'scale' or 'beta'."
elif beta is not None:
self.scale = 1.0 / beta
self.beta = beta
else:
assert beta is None, "Specify exactly one of 'scale' or 'beta'."
self.lgammaAlpha = lgamma(shape)
self.logBeta = -numpy.log(scale)
def logDensity(self, x):
return ((self.alpha - 1.0) * numpy.log(x) + self.alpha * self.logBeta
- self.beta * x - self.lgammaAlpha)
def sample(self, rgen):
x = rgen.gamma(self.alpha, self.scale)
return x, self.logDensity(x)
def logBeta(alpha):
alpha = numpy.asarray(alpha)
return numpy.sum([lgamma(a) for a in alpha]) - lgamma(alpha.sum())
class DirichletDistribution(Distribution):
def __init__(self, alpha):
self.alpha = numpy.asarray(alpha).astype(float)
self.alpha_1 = self.alpha - 1
self.logZ = - logBeta(self.alpha)
sum = self.alpha.sum()
self.sum = sum
def mean(self):
return self.alpha / self.sum
def var(self):
alpha = self.alpha
sum = self.sum
k = sum * sum * (sum + 1)
return alpha * (sum - alpha) / k
def cov(self):
cov = - numpy.outer(self.alpha, self.alpha)
sum = self.sum
k = sum * sum * (sum + 1)
diag = self.alpha * self.sum
return (numpy.diag(diag) + cov) / k
def logDensity(self, x):
logx = self.logZ + (self.alpha_1 * numpy.log(x)).sum()
return logx
def sample(self, rgen):
y = rgen.gamma(self.alpha, 1.0, size=len(self.alpha))
return y / y.sum()
class BetaDistribution(Distribution):
def __init__(self, alpha, beta):
self.distn = DirichletDistribution((alpha, beta))
def mean(self):
return self.distn.mean()[0]
def var(self):
return self.distn.var()[0]
def logDensity(self, x):
return self.distn.logDensity((x, 1.0 - x))
| 8,248 | Python | .py | 216 | 33.916667 | 85 | 0.664996 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,009 | stats.py | numenta_nupic-legacy/src/nupic/math/stats.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Module of statistical data structures and functions used in learning algorithms
and for analysis of HTM network inputs and outputs.
"""
import random
import numpy
from nupic.bindings.math import GetNTAReal, SparseMatrix
dtype = GetNTAReal()
def pickByDistribution(distribution, r=None):
"""
Pick a value according to the provided distribution.
Example:
::
pickByDistribution([.2, .1])
Returns 0 two thirds of the time and 1 one third of the time.
:param distribution: Probability distribution. Need not be normalized.
:param r: Instance of random.Random. Uses the system instance if one is
not provided.
"""
if r is None:
r = random
x = r.uniform(0, sum(distribution))
for i, d in enumerate(distribution):
if x <= d:
return i
x -= d
def Indicator(pos, size, dtype):
"""
Returns an array of length size and type dtype that is everywhere 0,
except in the index in pos.
:param pos: (int) specifies the position of the one entry that will be set.
:param size: (int) The total size of the array to be returned.
:param dtype: The element type (compatible with NumPy array())
of the array to be returned.
:returns: (list) of length ``size`` and element type ``dtype``.
"""
x = numpy.zeros(size, dtype=dtype)
x[pos] = 1
return x
def MultiArgMax(x):
"""
Get tuple (actually a generator) of indices where the max value of
array x occurs. Requires that x have a max() method, as x.max()
(in the case of NumPy) is much faster than max(x).
For a simpler, faster argmax when there is only a single maximum entry,
or when knowing only the first index where the maximum occurs,
call argmax() on a NumPy array.
:param x: Any sequence that has a max() method.
:returns: Generator with the indices where the max value occurs.
"""
m = x.max()
return (i for i, v in enumerate(x) if v == m)
def Any(sequence):
"""
Tests much faster (30%) than bool(sum(bool(x) for x in sequence)).
:returns: (bool) true if any element of the sequence satisfies True.
:param sequence: Any sequence whose elements can be evaluated as booleans.
"""
return bool(reduce(lambda x, y: x or y, sequence, False))
def All(sequence):
"""
:param sequence: Any sequence whose elements can be evaluated as booleans.
:returns: true if all elements of the sequence satisfy True and x.
"""
return bool(reduce(lambda x, y: x and y, sequence, True))
def Product(sequence):
"""
Returns the product of the elements of the sequence.
Use numpy.prod() if the sequence is an array, as it will be faster.
Remember that the product of many numbers may rapidly overflow or
underflow the numeric precision of the computer.
Use a sum of the logs of the sequence elements instead when precision
should be maintained.
:param sequence: Any sequence whose elements can be multiplied by their
neighbors.
:returns: A single value that is the product of all the sequence elements.
"""
return reduce(lambda x, y: x * y, sequence)
def MultiIndicator(pos, size, dtype):
"""
Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos.
:param pos: A single integer or sequence of integers that specify
the position of ones to be set.
:param size: The total size of the array to be returned.
:param dtype: The element type (compatible with NumPy array())
of the array to be returned.
:returns: An array of length size and element type dtype.
"""
x = numpy.zeros(size, dtype=dtype)
if hasattr(pos, '__iter__'):
for i in pos: x[i] = 1
else: x[pos] = 1
return x
def Distribution(pos, size, counts, dtype):
"""
Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos. The non-zero indices
contain a normalized distribution based on the counts.
:param pos: A single integer or sequence of integers that specify
the position of ones to be set.
:param size: The total size of the array to be returned.
:param counts: The number of times we have observed each index.
:param dtype: The element type (compatible with NumPy array())
of the array to be returned.
:returns: An array of length size and element type dtype.
"""
x = numpy.zeros(size, dtype=dtype)
if hasattr(pos, '__iter__'):
# calculate normalization constant
total = 0
for i in pos:
total += counts[i]
total = float(total)
# set included positions to normalized probability
for i in pos:
x[i] = counts[i]/total
# If we don't have a set of positions, assume there's only one position
else: x[pos] = 1
return x
class ConditionalProbabilityTable2D(object):
"""
Holds frequencies in a 2D grid of bins.
Binning is not performed automatically by this class.
Bin updates must be done one row at a time.
Based on nupic::SparseMatrix which is a compressed sparse row matrix.
Number of columns cannot be changed once set.
Number of rows may be increased.
Also maintains the row and column sumProp distributions.
Constructor constructs a new empty histogram with no rows or columns.
:param rowHint: if specified, ncols must be specified (though not vice versa)
:param ncols: if speicified, number of columns cannot be changed thereafter.
"""
def __init__(self, rowHint=None, ncols=None):
self.hist_ = None
self.rowSums_ = None
self.colSums_ = None
if ncols:
if not rowHint: rowHint = 1
assert dtype
self.grow(rowHint, ncols)
else: assert not rowHint
self.hack_ = None
def numRows(self):
"""Gets the number of rows in the histogram.
:returns: Integer number of rows.
"""
if self.hist_: return self.hist_.nRows()
else: return 0
def numColumns(self):
"""
:return: (int) number of columns
"""
if self.hist_: return self.hist_.nCols()
else: return 0
def grow(self, rows, cols):
"""
Grows the histogram to have rows rows and cols columns.
Must not have been initialized before, or already have the same
number of columns.
If rows is smaller than the current number of rows,
does not shrink.
Also updates the sizes of the row and column sums.
:param rows: Integer number of rows.
:param cols: Integer number of columns.
"""
if not self.hist_:
self.hist_ = SparseMatrix(rows, cols)
self.rowSums_ = numpy.zeros(rows, dtype=dtype)
self.colSums_ = numpy.zeros(cols, dtype=dtype)
self.hack_ = None
else:
oldRows = self.hist_.nRows()
oldCols = self.hist_.nCols()
nextRows = max(oldRows, rows)
nextCols = max(oldCols, cols)
if (oldRows < nextRows) or (oldCols < nextCols):
self.hist_.resize(nextRows, nextCols)
if oldRows < nextRows:
oldSums = self.rowSums_
self.rowSums_ = numpy.zeros(nextRows, dtype=dtype)
self.rowSums_[0:len(oldSums)] = oldSums
self.hack_ = None
if oldCols < nextCols:
oldSums = self.colSums_
self.colSums_ = numpy.zeros(nextCols, dtype=dtype)
self.colSums_[0:len(oldSums)] = oldSums
self.hack_ = None
def updateRow(self, row, distribution):
"""
Add distribution to row row.
Distribution should be an array of probabilities or counts.
:param row: Integer index of the row to add to.
May be larger than the current number of rows, in which case
the histogram grows.
:param distribution: Array of length equal to the number of columns.
"""
self.grow(row+1, len(distribution))
self.hist_.axby(row, 1, 1, distribution)
self.rowSums_[row] += distribution.sum()
self.colSums_ += distribution
self.hack_ = None # Clear out the cached inference.
def inferRow(self, distribution):
"""
Computes the sumProp probability of each row given the input probability
of each column. Normalizes the distribution in each column on the fly.
The semantics are as follows: If the distribution is P(col|e) where e is
the evidence is col is the column, and the CPD represents P(row|col), then
this calculates sum(P(col|e) P(row|col)) = P(row|e).
:param distribution: Array of length equal to the number of columns.
:returns: array of length equal to the number of rows.
"""
# normalize over colSums_ because P(row|col) = P(col,row)/P(col)
return self.hist_ * (distribution / self.colSums_)
def inferRowEvidence(self, distribution):
"""
Computes the probability of evidence given each row from the probability
of evidence given each column. Essentially, this just means that it sums
probabilities over (normalized) rows. Normalizes the distribution over
each row on the fly.
The semantics are as follows: If the distribution is P(e|col) where e is
evidence and col is the column, and the CPD is of P(col|row), then this
calculates sum(P(e|col) P(col|row)) = P(e|row).
:param distribution: Array of length equal to the number of columns.
:returns: array of length equal to the number of rows.
"""
# normalize over rowSums_ because P(col|row) = P(col,row)/P(row).
return (self.hist_ * distribution) / self.rowSums_
def inferRowMaxProd(self, distribution):
return self.hist_.vecMaxProd(distribution)
def inferRowCompat(self, distribution):
"""
Equivalent to the category inference of zeta1.TopLevel.
Computes the max_prod (maximum component of a component-wise multiply)
between the rows of the histogram and the incoming distribution.
May be slow if the result of clean_outcpd() is not valid.
:param distribution: Array of length equal to the number of columns.
:returns: array of length equal to the number of rows.
"""
if self.hack_ is None:
self.clean_outcpd()
return self.hack_.vecMaxProd(distribution)
def clean_outcpd(self):
"""Hack to act like clean_outcpd on zeta1.TopLevelNode.
Take the max element in each to column, set it to 1, and set all the
other elements to 0.
Only called by inferRowMaxProd() and only needed if an updateRow()
has been called since the last clean_outcpd().
"""
m = self.hist_.toDense()
for j in xrange(m.shape[1]): # For each column.
cmax = m[:,j].max()
if cmax:
m[:,j] = numpy.array(m[:,j] == cmax, dtype=dtype)
self.hack_ = SparseMatrix(0, self.hist_.nCols())
for i in xrange(m.shape[0]):
self.hack_.addRow(m[i,:])
def ShannonEntropy(x):
x = numpy.asarray(x, dtype=float)
s = x.sum()
if s: p = x / s
else: p = x
assert (p >= 0).all()
p = p[p != 0] # Get rid of 0s.
return - numpy.dot(p, numpy.log(p))
def ShannonEntropyLog(lx):
lx = numpy.asarray(lx)
lx = lx - lx.max()
x = numpy.exp(lx)
s = x.sum()
return - ( ( numpy.dot(x, lx) / s ) - numpy.log(s) )
def DifferentialEntropy(mass, areas=1.0):
x = numpy.asarray(mass, dtype=float)
p = x / x.sum()
return -numpy.dot(p, numpy.log(p)) + numpy.dot(p, numpy.log(areas))
#----------------------------------------
#Fuzzy k-means
def fuzzyKmeans(samples,fixCenter=None,iter=5,fuzzParam=1.5):
#Not actually k means yet just 3 means
if fixCenter is not None:
dMeans = [min(samples)+0.01 , fixCenter ,max(samples)-0.01]
else:
dMeans = [min(samples)+0.01 , mean(samples) ,max(samples)-0.01]
begDeg = map(None,numpy.zeros(len(samples)))
midDeg = map(None,numpy.zeros(len(samples)))
endDeg = map(None,numpy.zeros(len(samples)))
for j in range(iter):
for k in range(len(samples)):
pBeg = (1.0/(samples[k] - dMeans[2])**2)**(1.0/(fuzzParam-1))
pMid = (1.0/(samples[k] - dMeans[1])**2)**(1.0/(fuzzParam-1))
pEnd = (1.0/(samples[k] - dMeans[0])**2)**(1.0/(fuzzParam-1))
nmlz = pBeg + pMid + pEnd
begDeg[k] = pBeg/nmlz; midDeg[k] = pMid/nmlz; endDeg[k] = pEnd/nmlz
#Update means 0 and 2, the other should stay at zero! (Change this for general purpose k-means)
dMeans[0] = numpy.nansum((numpy.array(endDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(endDeg)**fuzzParam)
if fixCenter is None:
dMeans[1] = numpy.nansum((numpy.array(midDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(midDeg)**fuzzParam)
dMeans[2] = numpy.nansum((numpy.array(begDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(begDeg)**fuzzParam)
return dMeans
| 13,563 | Python | .py | 319 | 37.830721 | 134 | 0.686218 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,010 | topology.py | numenta_nupic-legacy/src/nupic/math/topology.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Tools to help with topology.
"""
import itertools
import numpy
def coordinatesFromIndex(index, dimensions):
"""
Translate an index into coordinates, using the given coordinate system.
Similar to ``numpy.unravel_index``.
:param index: (int) The index of the point. The coordinates are expressed as a
single index by using the dimensions as a mixed radix definition. For
example, in dimensions 42x10, the point [1, 4] is index
1*420 + 4*10 = 460.
:param dimensions (list of ints) The coordinate system.
:returns: (list) of coordinates of length ``len(dimensions)``.
"""
coordinates = [0] * len(dimensions)
shifted = index
for i in xrange(len(dimensions) - 1, 0, -1):
coordinates[i] = shifted % dimensions[i]
shifted = shifted / dimensions[i]
coordinates[0] = shifted
return coordinates
def indexFromCoordinates(coordinates, dimensions):
"""
Translate coordinates into an index, using the given coordinate system.
Similar to ``numpy.ravel_multi_index``.
:param coordinates: (list of ints) A list of coordinates of length
``dimensions.size()``.
:param dimensions: (list of ints) The coordinate system.
:returns: (int) The index of the point. The coordinates are expressed as a
single index by using the dimensions as a mixed radix definition.
For example, in dimensions 42x10, the point [1, 4] is index
1*420 + 4*10 = 460.
"""
index = 0
for i, dimension in enumerate(dimensions):
index *= dimension
index += coordinates[i]
return index
def neighborhood(centerIndex, radius, dimensions):
"""
Get the points in the neighborhood of a point.
A point's neighborhood is the n-dimensional hypercube with sides ranging
[center - radius, center + radius], inclusive. For example, if there are two
dimensions and the radius is 3, the neighborhood is 6x6. Neighborhoods are
truncated when they are near an edge.
This is designed to be fast. In C++ it's fastest to iterate through neighbors
one by one, calculating them on-demand rather than creating a list of them.
But in Python it's faster to build up the whole list in batch via a few calls
to C code rather than calculating them on-demand with lots of calls to Python
code.
:param centerIndex: (int) The index of the point. The coordinates are
expressed as a single index by using the dimensions as a mixed radix
definition. For example, in dimensions 42x10, the point [1, 4] is index
1*420 + 4*10 = 460.
:param radius: (int) The radius of this neighborhood about the
``centerIndex``.
:param dimensions: (indexable sequence) The dimensions of the world outside
this neighborhood.
:returns: (numpy array) The points in the neighborhood, including
``centerIndex``.
"""
centerPosition = coordinatesFromIndex(centerIndex, dimensions)
intervals = []
for i, dimension in enumerate(dimensions):
left = max(0, centerPosition[i] - radius)
right = min(dimension - 1, centerPosition[i] + radius)
intervals.append(xrange(left, right + 1))
coords = numpy.array(list(itertools.product(*intervals)))
return numpy.ravel_multi_index(coords.T, dimensions)
def wrappingNeighborhood(centerIndex, radius, dimensions):
"""
Like :meth:`neighborhood`, except that the neighborhood isn't truncated when
it's near an edge. It wraps around to the other side.
:param centerIndex: (int) The index of the point. The coordinates are
expressed as a single index by using the dimensions as a mixed radix
definition. For example, in dimensions 42x10, the point [1, 4] is index
1*420 + 4*10 = 460.
:param radius: (int) The radius of this neighborhood about the
``centerIndex``.
:param dimensions: (indexable sequence) The dimensions of the world outside
this neighborhood.
:returns: (numpy array) The points in the neighborhood, including
``centerIndex``.
"""
centerPosition = coordinatesFromIndex(centerIndex, dimensions)
intervals = []
for i, dimension in enumerate(dimensions):
left = centerPosition[i] - radius
right = min(centerPosition[i] + radius,
left + dimensions[i] - 1)
interval = [v % dimension for v in xrange(left, right + 1)]
intervals.append(interval)
coords = numpy.array(list(itertools.product(*intervals)))
return numpy.ravel_multi_index(coords.T, dimensions)
| 5,477 | Python | .py | 116 | 42.646552 | 81 | 0.704601 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,011 | ModelRunner.py | numenta_nupic-legacy/src/nupic/swarming/ModelRunner.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import time
import logging
import os
import sys
import shutil
import StringIO
import threading
import traceback
from collections import deque
from nupic.swarming.hypersearch import regression
from nupic.swarming.hypersearch.error_codes import ErrorCodes
from nupic.database.client_jobs_dao import ClientJobsDAO
from nupic.frameworks.opf import helpers
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.opf_basic_environment import BasicPredictionLogger
from nupic.frameworks.opf.opf_utils import matchPatterns
from nupic.frameworks.opf.periodic import (PeriodicActivityMgr,
PeriodicActivityRequest)
from nupic.frameworks.opf.prediction_metrics_manager import MetricsManager
from nupic.support.configuration import Configuration
from nupic.swarming.experiment_utils import InferenceElement
from nupic.swarming import utils
class OPFModelRunner(object):
"""This class runs an a given Model"""
# The minimum number of records that need to have been read for this model
# to be a candidate for 'best model'
_MIN_RECORDS_TO_BE_BEST = None
# The number of points we look at when trying to figure out whether or not a
# model has matured
_MATURITY_NUM_POINTS = None
# The maximum rate of change in the model's metric for it to be considered 'mature'
_MATURITY_MAX_CHANGE = None
def __init__(self,
modelID,
jobID,
predictedField,
experimentDir,
reportKeyPatterns,
optimizeKeyPattern,
jobsDAO,
modelCheckpointGUID,
logLevel=None,
predictionCacheMaxRecords=None):
"""
Parameters:
-------------------------------------------------------------------------
modelID: ID for this model in the models table
jobID: ID for this hypersearch job in the jobs table
predictedField: Name of the input field for which this model is being
optimized
experimentDir: Directory path containing the experiment's
description.py script
reportKeyPatterns: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKeyPattern: Which report item, if any, we will be optimizing for.
This can also be a regular expression, but is an error
if it matches more than one key from the experiment's
results.
jobsDAO: Jobs data access object - the interface to the
jobs database which has the model's table.
modelCheckpointGUID:
A persistent, globally-unique identifier for
constructing the model checkpoint key. If None, then
don't bother creating a model checkpoint.
logLevel: override logging level to this value, if not None
predictionCacheMaxRecords:
Maximum number of records for the prediction output cache.
Pass None for default value.
"""
# -----------------------------------------------------------------------
# Initialize class constants
# -----------------------------------------------------------------------
self._MIN_RECORDS_TO_BE_BEST = int(Configuration.get('nupic.hypersearch.bestModelMinRecords'))
self._MATURITY_MAX_CHANGE = float(Configuration.get('nupic.hypersearch.maturityPctChange'))
self._MATURITY_NUM_POINTS = int(Configuration.get('nupic.hypersearch.maturityNumPoints'))
# -----------------------------------------------------------------------
# Initialize instance variables
# -----------------------------------------------------------------------
self._modelID = modelID
self._jobID = jobID
self._predictedField = predictedField
self._experimentDir = experimentDir
self._reportKeyPatterns = reportKeyPatterns
self._optimizeKeyPattern = optimizeKeyPattern
self._jobsDAO = jobsDAO
self._modelCheckpointGUID = modelCheckpointGUID
self._predictionCacheMaxRecords = predictionCacheMaxRecords
self._isMaturityEnabled = bool(int(Configuration.get('nupic.hypersearch.enableModelMaturity')))
self._logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
self._optimizedMetricLabel = None
self._reportMetricLabels = []
# Our default completion reason
self._cmpReason = ClientJobsDAO.CMPL_REASON_EOF
if logLevel is not None:
self._logger.setLevel(logLevel)
# The manager object to compute the metrics for this model
self.__metricMgr = None
# Will be set to a new instance of OPFTaskDriver by __runTask()
#self.__taskDriver = None
# Current task control parameters. Will be set by __runTask()
self.__task = None
# Will be set to a new instance of PeriodicActivityManager by __runTask()
self._periodic = None
# Will be set to streamDef string by _runTask()
self._streamDef = None
# Will be set to new OpfExperiment instance by run()
self._model = None
# Will be set to new InputSource by __runTask()
self._inputSource = None
# 0-based index of the record being processed;
# Initialized and updated by __runTask()
self._currentRecordIndex = None
# Interface to write predictions to a persistent storage
self._predictionLogger = None
# In-memory cache for predictions. Predictions are written here for speed
# when they don't need to be written to a persistent store
self.__predictionCache = deque()
# Flag to see if this is the best model in the job (as determined by the
# model chooser logic). This is essentially a cache of the value in the
# ClientJobsDB
self._isBestModel = False
# Flag to see if there is a best model (not necessarily this one)
# stored in the DB
self._isBestModelStored = False
# -----------------------------------------------------------------------
# Flags for model cancelation/checkpointing
# -----------------------------------------------------------------------
# Flag to see if the job that this model is part of
self._isCanceled = False
# Flag to see if model was killed, either by the model terminator or by the
# hypsersearch implementation (ex. the a swarm is killed/matured)
self._isKilled = False
# Flag to see if the model is matured. In most cases, this means that we
# should stop running the model. The only execption is if this model is the
# best model for the job, in which case it should continue running.
self._isMature = False
# Event to see if interrupt signal has been sent
self._isInterrupted = threading.Event()
# -----------------------------------------------------------------------
# Facilities for measuring model maturity
# -----------------------------------------------------------------------
# List of tuples, (iteration, metric), used to see if the model has 'matured'
self._metricRegression = regression.AveragePctChange(windowSize=self._MATURITY_NUM_POINTS)
self.__loggedMetricPatterns = []
def run(self):
""" Runs the OPF Model
Parameters:
-------------------------------------------------------------------------
retval: (completionReason, completionMsg)
where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX
equates.
"""
# -----------------------------------------------------------------------
# Load the experiment's description.py module
descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
self._experimentDir)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
expIface.normalizeStreamSources()
modelDescription = expIface.getModelDescription()
self._modelControl = expIface.getModelControl()
# -----------------------------------------------------------------------
# Create the input data stream for this task
streamDef = self._modelControl['dataset']
from nupic.data.stream_reader import StreamReader
readTimeout = 0
self._inputSource = StreamReader(streamDef, isBlocking=False,
maxTimeout=readTimeout)
# -----------------------------------------------------------------------
#Get field statistics from the input source
fieldStats = self._getFieldStats()
# -----------------------------------------------------------------------
# Construct the model instance
self._model = ModelFactory.create(modelDescription)
self._model.setFieldStatistics(fieldStats)
self._model.enableLearning()
self._model.enableInference(self._modelControl.get("inferenceArgs", None))
# -----------------------------------------------------------------------
# Instantiate the metrics
self.__metricMgr = MetricsManager(self._modelControl.get('metrics',None),
self._model.getFieldInfo(),
self._model.getInferenceType())
self.__loggedMetricPatterns = self._modelControl.get("loggedMetrics", [])
self._optimizedMetricLabel = self.__getOptimizedMetricLabel()
self._reportMetricLabels = matchPatterns(self._reportKeyPatterns,
self._getMetricLabels())
# -----------------------------------------------------------------------
# Initialize periodic activities (e.g., for model result updates)
self._periodic = self._initPeriodicActivities()
# -----------------------------------------------------------------------
# Create our top-level loop-control iterator
numIters = self._modelControl.get('iterationCount', -1)
# Are we asked to turn off learning for a certain # of iterations near the
# end?
learningOffAt = None
iterationCountInferOnly = self._modelControl.get('iterationCountInferOnly', 0)
if iterationCountInferOnly == -1:
self._model.disableLearning()
elif iterationCountInferOnly > 0:
assert numIters > iterationCountInferOnly, "when iterationCountInferOnly " \
"is specified, iterationCount must be greater than " \
"iterationCountInferOnly."
learningOffAt = numIters - iterationCountInferOnly
self.__runTaskMainLoop(numIters, learningOffAt=learningOffAt)
# -----------------------------------------------------------------------
# Perform final operations for model
self._finalize()
return (self._cmpReason, None)
def __runTaskMainLoop(self, numIters, learningOffAt=None):
""" Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number
"""
## Reset sequence states in the model, so it starts looking for a new
## sequence
self._model.resetSequenceStates()
self._currentRecordIndex = -1
while True:
# If killed by a terminator, stop running
if self._isKilled:
break
# If job stops or hypersearch ends, stop running
if self._isCanceled:
break
# If the process is about to be killed, set as orphaned
if self._isInterrupted.isSet():
self.__setAsOrphaned()
break
# If model is mature, stop running ONLY IF we are not the best model
# for the job. Otherwise, keep running so we can keep returning
# predictions to the user
if self._isMature:
if not self._isBestModel:
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
# Turn off learning?
if learningOffAt is not None \
and self._currentRecordIndex == learningOffAt:
self._model.disableLearning()
# Read input record. Note that any failure here is a critical JOB failure
# and results in the job being immediately canceled and marked as
# failed. The runModelXXX code in hypesearch.utils, if it sees an
# exception of type utils.JobFailException, will cancel the job and
# copy the error message into the job record.
try:
inputRecord = self._inputSource.getNextRecordDict()
if self._currentRecordIndex < 0:
self._inputSource.setTimeout(10)
except Exception, e:
raise utils.JobFailException(ErrorCodes.streamReading, str(e.args),
traceback.format_exc())
if inputRecord is None:
# EOF
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
break
if inputRecord:
# Process input record
self._currentRecordIndex += 1
result = self._model.run(inputRecord=inputRecord)
# Compute metrics.
result.metrics = self.__metricMgr.update(result)
# If there are None, use defaults. see MetricsManager.getMetrics()
# TODO remove this when JAVA API server is gone
if not result.metrics:
result.metrics = self.__metricMgr.getMetrics()
# Write the result to the output cache. Don't write encodings, if they
# were computed
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
result.sensorInput.dataEncodings = None
self._writePrediction(result)
# Run periodic activities
self._periodic.tick()
if numIters >= 0 and self._currentRecordIndex >= numIters-1:
break
else:
# Input source returned an empty record.
#
# NOTE: This is okay with Stream-based Source (when it times out
# waiting for next record), but not okay with FileSource, which should
# always return either with a valid record or None for EOF.
raise ValueError("Got an empty record from FileSource: %r" %
inputRecord)
def _finalize(self):
"""Run final activities after a model has run. These include recording and
logging the final score"""
self._logger.info(
"Finished: modelID=%r; %r records processed. Performing final activities",
self._modelID, self._currentRecordIndex + 1)
# =========================================================================
# Dump the experiment metrics at the end of the task
# =========================================================================
self._updateModelDBResults()
# =========================================================================
# Check if the current model is the best. Create a milestone if necessary
# If the model has been killed, it is not a candidate for "best model",
# and its output cache should be destroyed
# =========================================================================
if not self._isKilled:
self.__updateJobResults()
else:
self.__deleteOutputCache(self._modelID)
# =========================================================================
# Close output stream, if necessary
# =========================================================================
if self._predictionLogger:
self._predictionLogger.close()
# =========================================================================
# Close input stream, if necessary
# =========================================================================
if self._inputSource:
self._inputSource.close()
def __createModelCheckpoint(self):
""" Create a checkpoint from the current model, and store it in a dir named
after checkpoint GUID, and finally store the GUID in the Models DB """
if self._model is None or self._modelCheckpointGUID is None:
return
# Create an output store, if one doesn't exist already
if self._predictionLogger is None:
self._createPredictionLogger()
predictions = StringIO.StringIO()
self._predictionLogger.checkpoint(
checkpointSink=predictions,
maxRows=int(Configuration.get('nupic.model.checkpoint.maxPredictionRows')))
self._model.save(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))
self._jobsDAO.modelSetFields(modelID,
{'modelCheckpointId':str(self._modelCheckpointGUID)},
ignoreUnchanged=True)
self._logger.info("Checkpointed Hypersearch Model: modelID: %r, "
"checkpointID: %r", self._modelID, checkpointID)
return
def __deleteModelCheckpoint(self, modelID):
"""
Delete the stored checkpoint for the specified modelID. This function is
called if the current model is now the best model, making the old model's
checkpoint obsolete
Parameters:
-----------------------------------------------------------------------
modelID: The modelID for the checkpoint to delete. This is NOT the
unique checkpointID
"""
checkpointID = \
self._jobsDAO.modelsGetFields(modelID, ['modelCheckpointId'])[0]
if checkpointID is None:
return
try:
shutil.rmtree(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))
except:
self._logger.warn("Failed to delete model checkpoint %s. "\
"Assuming that another worker has already deleted it",
checkpointID)
return
self._jobsDAO.modelSetFields(modelID,
{'modelCheckpointId':None},
ignoreUnchanged=True)
return
def _createPredictionLogger(self):
"""
Creates the model's PredictionLogger object, which is an interface to write
model results to a permanent storage location
"""
# Write results to a file
self._predictionLogger = BasicPredictionLogger(
fields=self._model.getFieldInfo(),
experimentDir=self._experimentDir,
label = "hypersearch-worker",
inferenceType=self._model.getInferenceType())
if self.__loggedMetricPatterns:
metricLabels = self.__metricMgr.getMetricLabels()
loggedMetrics = matchPatterns(self.__loggedMetricPatterns, metricLabels)
self._predictionLogger.setLoggedMetrics(loggedMetrics)
def __getOptimizedMetricLabel(self):
""" Get the label for the metric being optimized. This function also caches
the label in the instance variable self._optimizedMetricLabel
Parameters:
-----------------------------------------------------------------------
metricLabels: A sequence of all the labels being computed for this model
Returns: The label for the metric being optmized over
"""
matchingKeys = matchPatterns([self._optimizeKeyPattern],
self._getMetricLabels())
if len(matchingKeys) == 0:
raise Exception("None of the generated metrics match the specified "
"optimization pattern: %s. Available metrics are %s" % \
(self._optimizeKeyPattern, self._getMetricLabels()))
elif len(matchingKeys) > 1:
raise Exception("The specified optimization pattern '%s' matches more "
"than one metric: %s" % (self._optimizeKeyPattern, matchingKeys))
return matchingKeys[0]
def _getMetricLabels(self):
"""
Returns: A list of labels that correspond to metrics being computed
"""
return self.__metricMgr.getMetricLabels()
def _getFieldStats(self):
"""
Method which returns a dictionary of field statistics received from the
input source.
Returns:
fieldStats: dict of dicts where the first level is the field name and
the second level is the statistic. ie. fieldStats['pounds']['min']
"""
fieldStats = dict()
fieldNames = self._inputSource.getFieldNames()
for field in fieldNames:
curStats = dict()
curStats['min'] = self._inputSource.getFieldMin(field)
curStats['max'] = self._inputSource.getFieldMax(field)
fieldStats[field] = curStats
return fieldStats
def _getMetrics(self):
""" Protected function that can be overriden by subclasses. Its main purpose
is to allow the the OPFDummyModelRunner to override this with deterministic
values
Returns: All the metrics being computed for this model
"""
return self.__metricMgr.getMetrics()
def _updateModelDBResults(self):
""" Retrieves the current results and updates the model's record in
the Model database.
"""
# -----------------------------------------------------------------------
# Get metrics
metrics = self._getMetrics()
# -----------------------------------------------------------------------
# Extract report metrics that match the requested report REs
reportDict = dict([(k,metrics[k]) for k in self._reportMetricLabels])
# -----------------------------------------------------------------------
# Extract the report item that matches the optimize key RE
# TODO cache optimizedMetricLabel sooner
metrics = self._getMetrics()
optimizeDict = dict()
if self._optimizeKeyPattern is not None:
optimizeDict[self._optimizedMetricLabel] = \
metrics[self._optimizedMetricLabel]
# -----------------------------------------------------------------------
# Update model results
results = json.dumps((metrics , optimizeDict))
self._jobsDAO.modelUpdateResults(self._modelID, results=results,
metricValue=optimizeDict.values()[0],
numRecords=(self._currentRecordIndex + 1))
self._logger.debug(
"Model Results: modelID=%s; numRecords=%s; results=%s" % \
(self._modelID, self._currentRecordIndex + 1, results))
return
def __updateJobResultsPeriodic(self):
"""
Periodic check to see if this is the best model. This should only have an
effect if this is the *first* model to report its progress
"""
if self._isBestModelStored and not self._isBestModel:
return
while True:
jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is None:
jobResults = {}
else:
self._isBestModelStored = True
if not self._isBestModel:
return
jobResults = json.loads(jobResultsStr)
bestModel = jobResults.get('bestModel', None)
bestMetric = jobResults.get('bestValue', None)
isSaved = jobResults.get('saved', False)
# If there is a best model, and it is not the same as the current model
# we should wait till we have processed all of our records to see if
# we are the the best
if (bestModel is not None) and (self._modelID != bestModel):
self._isBestModel = False
return
# Make sure prediction output stream is ready before we present our model
# as "bestModel"; sometimes this takes a long time, so update the model's
# timestamp to help avoid getting orphaned
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = False
newResults = json.dumps(jobResults)
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=newResults)
if isUpdated or (not isUpdated and newResults==jobResultsStr):
self._isBestModel = True
break
def __checkIfBestCompletedModel(self):
"""
Reads the current "best model" for the job and returns whether or not the
current model is better than the "best model" stored for the job
Returns: (isBetter, storedBest, origResultsStr)
isBetter:
True if the current model is better than the stored "best model"
storedResults:
A dict of the currently stored results in the jobs table record
origResultsStr:
The json-encoded string that currently resides in the "results" field
of the jobs record (used to create atomicity)
"""
jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is None:
jobResults = {}
else:
jobResults = json.loads(jobResultsStr)
isSaved = jobResults.get('saved', False)
bestMetric = jobResults.get('bestValue', None)
currentMetric = self._getMetrics()[self._optimizedMetricLabel]
self._isBestModel = (not isSaved) \
or (currentMetric < bestMetric)
return self._isBestModel, jobResults, jobResultsStr
def __updateJobResults(self):
""""
Check if this is the best model
If so:
1) Write it's checkpoint
2) Record this model as the best
3) Delete the previous best's output cache
Otherwise:
1) Delete our output cache
"""
isSaved = False
while True:
self._isBestModel, jobResults, jobResultsStr = \
self.__checkIfBestCompletedModel()
# -----------------------------------------------------------------------
# If the current model is the best:
# 1) Save the model's predictions
# 2) Checkpoint the model state
# 3) Update the results for the job
if self._isBestModel:
# Save the current model and its results
if not isSaved:
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__createModelCheckpoint()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
isSaved = True
# Now record the model as the best for the job
prevBest = jobResults.get('bestModel', None)
prevWasSaved = jobResults.get('saved', False)
# If the current model is the best, it shouldn't already be checkpointed
if prevBest == self._modelID:
assert not prevWasSaved
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = True
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
if prevWasSaved:
self.__deleteOutputCache(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self._logger.info("Model %d chosen as best model", self._modelID)
break
# -----------------------------------------------------------------------
# If the current model is not the best, delete its outputs
else:
# NOTE: we update model timestamp around these occasionally-lengthy
# operations to help prevent the model from becoming orphaned
self.__deleteOutputCache(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
break
def _writePrediction(self, result):
"""
Writes the results of one iteration of a model. The results are written to
this ModelRunner's in-memory cache unless this model is the "best model" for
the job. If this model is the "best model", the predictions are written out
to a permanent store via a prediction output stream instance
Parameters:
-----------------------------------------------------------------------
result: A opf_utils.ModelResult object, which contains the input and
output for this iteration
"""
self.__predictionCache.append(result)
if self._isBestModel:
self.__flushPredictionCache()
def __writeRecordsCallback(self):
""" This callback is called by self.__predictionLogger.writeRecords()
between each batch of records it writes. It gives us a chance to say that
the model is 'still alive' during long write operations.
"""
# This updates the engLastUpdateTime of the model record so that other
# worker's don't think that this model is orphaned.
self._jobsDAO.modelUpdateResults(self._modelID)
def __flushPredictionCache(self):
"""
Writes the contents of this model's in-memory prediction cache to a permanent
store via the prediction output stream instance
"""
if not self.__predictionCache:
return
# Create an output store, if one doesn't exist already
if self._predictionLogger is None:
self._createPredictionLogger()
startTime = time.time()
self._predictionLogger.writeRecords(self.__predictionCache,
progressCB=self.__writeRecordsCallback)
self._logger.info("Flushed prediction cache; numrows=%s; elapsed=%s sec.",
len(self.__predictionCache), time.time() - startTime)
self.__predictionCache.clear()
def __deleteOutputCache(self, modelID):
"""
Delete's the output cache associated with the given modelID. This actually
clears up the resources associated with the cache, rather than deleting al
the records in the cache
Parameters:
-----------------------------------------------------------------------
modelID: The id of the model whose output cache is being deleted
"""
# If this is our output, we should close the connection
if modelID == self._modelID and self._predictionLogger is not None:
self._predictionLogger.close()
del self.__predictionCache
self._predictionLogger = None
self.__predictionCache = None
def _initPeriodicActivities(self):
""" Creates and returns a PeriodicActivityMgr instance initialized with
our periodic activities
Parameters:
-------------------------------------------------------------------------
retval: a PeriodicActivityMgr instance
"""
# Activity to update the metrics for this model
# in the models table
updateModelDBResults = PeriodicActivityRequest(repeating=True,
period=100,
cb=self._updateModelDBResults)
updateJobResults = PeriodicActivityRequest(repeating=True,
period=100,
cb=self.__updateJobResultsPeriodic)
checkCancelation = PeriodicActivityRequest(repeating=True,
period=50,
cb=self.__checkCancelation)
checkMaturity = PeriodicActivityRequest(repeating=True,
period=10,
cb=self.__checkMaturity)
# Do an initial update of the job record after 2 iterations to make
# sure that it is populated with something without having to wait too long
updateJobResultsFirst = PeriodicActivityRequest(repeating=False,
period=2,
cb=self.__updateJobResultsPeriodic)
periodicActivities = [updateModelDBResults,
updateJobResultsFirst,
updateJobResults,
checkCancelation]
if self._isMaturityEnabled:
periodicActivities.append(checkMaturity)
return PeriodicActivityMgr(requestedActivities=periodicActivities)
def __checkCancelation(self):
""" Check if the cancelation flag has been set for this model
in the Model DB"""
# Update a hadoop job counter at least once every 600 seconds so it doesn't
# think our map task is dead
print >>sys.stderr, "reporter:counter:HypersearchWorker,numRecords,50"
# See if the job got cancelled
jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED
self._isCanceled = True
self._logger.info("Model %s canceled because Job %s was stopped.",
self._modelID, self._jobID)
else:
stopReason = self._jobsDAO.modelsGetFields(self._modelID, ['engStop'])[0]
if stopReason is None:
pass
elif stopReason == ClientJobsDAO.STOP_REASON_KILLED:
self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED
self._isKilled = True
self._logger.info("Model %s canceled because it was killed by hypersearch",
self._modelID)
elif stopReason == ClientJobsDAO.STOP_REASON_STOPPED:
self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED
self._isCanceled = True
self._logger.info("Model %s stopped because hypersearch ended", self._modelID)
else:
raise RuntimeError ("Unexpected stop reason encountered: %s" % (stopReason))
def __checkMaturity(self):
""" Save the current metric value and see if the model's performance has
'leveled off.' We do this by looking at some number of previous number of
recordings """
if self._currentRecordIndex+1 < self._MIN_RECORDS_TO_BE_BEST:
return
# If we are already mature, don't need to check anything
if self._isMature:
return
metric = self._getMetrics()[self._optimizedMetricLabel]
self._metricRegression.addPoint(x=self._currentRecordIndex, y=metric)
# Perform a linear regression to see if the error is leveled off
#pctChange = self._metricRegression.getPctChange()
#if pctChange is not None and abs(pctChange ) <= self._MATURITY_MAX_CHANGE:
pctChange, absPctChange = self._metricRegression.getPctChanges()
if pctChange is not None and absPctChange <= self._MATURITY_MAX_CHANGE:
self._jobsDAO.modelSetFields(self._modelID,
{'engMatured':True})
# TODO: Don't stop if we are currently the best model. Also, if we
# are still running after maturity, we have to periodically check to
# see if we are still the best model. As soon we lose to some other
# model, then we should stop at that point.
self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED
self._isMature = True
self._logger.info("Model %d has matured (pctChange=%s, n=%d). \n"\
"Scores = %s\n"\
"Stopping execution",self._modelID, pctChange,
self._MATURITY_NUM_POINTS,
self._metricRegression._window)
def handleWarningSignal(self, signum, frame):
"""
Handles a "warning signal" from the scheduler. This is received when the
scheduler is about to kill the the current process so that the worker can be
allocated to another job.
Right now, this function just sets the current model to the "Orphaned" state
in the models table so that another worker can eventually re-run this model
Parameters:
-----------------------------------------------------------------------
"""
self._isInterrupted.set()
def __setAsOrphaned(self):
"""
Sets the current model as orphaned. This is called when the scheduler is
about to kill the process to reallocate the worker to a different process.
"""
cmplReason = ClientJobsDAO.CMPL_REASON_ORPHAN
cmplMessage = "Killed by Scheduler"
self._jobsDAO.modelSetCompleted(self._modelID, cmplReason, cmplMessage)
| 37,471 | Python | .py | 761 | 40.554534 | 99 | 0.614478 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,012 | api.py | numenta_nupic-legacy/src/nupic/swarming/api.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""External API for hypersearch-related functions."""
import json
import os
import shutil
import tempfile
from nupic.frameworks.opf import helpers
from nupic.database.client_jobs_dao import ClientJobsDAO
from nupic.support.configuration import Configuration
def createAndStartSwarm(client, clientInfo="", clientKey="", params="",
minimumWorkers=None, maximumWorkers=None,
alreadyRunning=False):
"""Create and start a swarm job.
Args:
client - A string identifying the calling client. There is a small limit
for the length of the value. See ClientJobsDAO.CLIENT_MAX_LEN.
clientInfo - JSON encoded dict of client specific information.
clientKey - Foreign key. Limited in length, see ClientJobsDAO._initTables.
params - JSON encoded dict of the parameters for the job. This can be
fetched out of the database by the worker processes based on the jobID.
minimumWorkers - The minimum workers to allocate to the swarm. Set to None
to use the default.
maximumWorkers - The maximum workers to allocate to the swarm. Set to None
to use the swarm default. Set to 0 to use the maximum scheduler value.
alreadyRunning - Insert a job record for an already running process. Used
for testing.
"""
if minimumWorkers is None:
minimumWorkers = Configuration.getInt(
"nupic.hypersearch.minWorkersPerSwarm")
if maximumWorkers is None:
maximumWorkers = Configuration.getInt(
"nupic.hypersearch.maxWorkersPerSwarm")
return ClientJobsDAO.get().jobInsert(
client=client,
cmdLine="$HYPERSEARCH",
clientInfo=clientInfo,
clientKey=clientKey,
alreadyRunning=alreadyRunning,
params=params,
minimumWorkers=minimumWorkers,
maximumWorkers=maximumWorkers,
jobType=ClientJobsDAO.JOB_TYPE_HS)
def getSwarmModelParams(modelID):
"""Retrieve the Engine-level model params from a Swarm model
Args:
modelID - Engine-level model ID of the Swarm model
Returns:
JSON-encoded string containing Model Params
"""
# TODO: the use of nupic.frameworks.opf.helpers.loadExperimentDescriptionScriptFromDir when
# retrieving module params results in a leakage of pf_base_descriptionNN and
# pf_descriptionNN module imports for every call to getSwarmModelParams, so
# the leakage is unlimited when getSwarmModelParams is called by a
# long-running process. An alternate solution is to execute the guts of
# this function's logic in a seprate process (via multiprocessing module).
cjDAO = ClientJobsDAO.get()
(jobID, description) = cjDAO.modelsGetFields(
modelID,
["jobId", "genDescription"])
(baseDescription,) = cjDAO.jobGetFields(jobID, ["genBaseDescription"])
# Construct a directory with base.py and description.py for loading model
# params, and use nupic.frameworks.opf.helpers to extract model params from
# those files
descriptionDirectory = tempfile.mkdtemp()
try:
baseDescriptionFilePath = os.path.join(descriptionDirectory, "base.py")
with open(baseDescriptionFilePath, mode="wb") as f:
f.write(baseDescription)
descriptionFilePath = os.path.join(descriptionDirectory, "description.py")
with open(descriptionFilePath, mode="wb") as f:
f.write(description)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
helpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory))
return json.dumps(
dict(
modelConfig=expIface.getModelDescription(),
inferenceArgs=expIface.getModelControl().get("inferenceArgs", None)))
finally:
shutil.rmtree(descriptionDirectory, ignore_errors=True)
| 4,681 | Python | .py | 99 | 42.767677 | 93 | 0.735861 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,013 | permutation_helpers.py | numenta_nupic-legacy/src/nupic/swarming/permutation_helpers.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporary file to keep the API consistent (especially all permutation.py files)
"""
from nupic.swarming.hypersearch.permutation_helpers import * | 1,126 | Python | .py | 24 | 45.875 | 79 | 0.684832 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,014 | utils.py | numenta_nupic-legacy/src/nupic/swarming/utils.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import json
import os
import sys
import tempfile
import logging
import re
import traceback
import StringIO
from collections import namedtuple
import pprint
import shutil
import types
import signal
import uuid
import validictory
from nupic.database.client_jobs_dao import (
ClientJobsDAO, InvalidConnectionException)
# TODO: Note the function 'rUpdate' is also duplicated in the
# nupic.data.dictutils module -- we will eventually want to change this
# TODO: 'ValidationError', 'validate', 'loadJSONValueFromFile' duplicated in
# nupic.data.jsonhelpers -- will want to remove later
class JobFailException(Exception):
""" If a model raises this exception, then the runModelXXX code will
mark the job as canceled so that all other workers exit immediately, and mark
the job as failed.
"""
pass
def getCopyrightHead():
return """# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
def _paramsFileHead():
"""
This is the first portion of every sub-experiment params file we generate. Between
the head and the tail are the experiment specific options.
"""
str = getCopyrightHead() + \
"""
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
"""
return str
def _paramsFileTail():
"""
This is the tail of every params file we generate. Between the head and the tail
are the experiment specific options.
"""
str = \
"""
}
mod = importBaseDescription('base.py', config)
locals().update(mod.__dict__)
"""
return str
def _appendReportKeys(keys, prefix, results):
"""
Generate a set of possible report keys for an experiment's results.
A report key is a string of key names separated by colons, each key being one
level deeper into the experiment results dict. For example, 'key1:key2'.
This routine is called recursively to build keys that are multiple levels
deep from the results dict.
Parameters:
-----------------------------------------------------------
keys: Set of report keys accumulated so far
prefix: prefix formed so far, this is the colon separated list of key
names that led up to the dict passed in results
results: dictionary of results at this level.
"""
allKeys = results.keys()
allKeys.sort()
for key in allKeys:
if hasattr(results[key], 'keys'):
_appendReportKeys(keys, "%s%s:" % (prefix, key), results[key])
else:
keys.add("%s%s" % (prefix, key))
class _BadKeyError(Exception):
""" If a model raises this exception, then the runModelXXX code will
mark the job as canceled so that all other workers exit immediately, and mark
the job as failed.
"""
pass
def _matchReportKeys(reportKeyREs=[], allReportKeys=[]):
"""
Extract all items from the 'allKeys' list whose key matches one of the regular
expressions passed in 'reportKeys'.
Parameters:
----------------------------------------------------------------------------
reportKeyREs: List of regular expressions
allReportKeys: List of all keys
retval: list of keys from allReportKeys that match the regular expressions
in 'reportKeyREs'
If an invalid regular expression was included in 'reportKeys',
then BadKeyError() is raised
"""
matchingReportKeys = []
# Extract the report items of interest
for keyRE in reportKeyREs:
# Find all keys that match this regular expression
matchObj = re.compile(keyRE)
found = False
for keyName in allReportKeys:
match = matchObj.match(keyName)
if match and match.end() == len(keyName):
matchingReportKeys.append(keyName)
found = True
if not found:
raise _BadKeyError(keyRE)
return matchingReportKeys
def _getReportItem(itemName, results):
"""
Get a specific item by name out of the results dict.
The format of itemName is a string of dictionary keys separated by colons,
each key being one level deeper into the results dict. For example,
'key1:key2' would fetch results['key1']['key2'].
If itemName is not found in results, then None is returned
"""
subKeys = itemName.split(':')
subResults = results
for subKey in subKeys:
subResults = subResults[subKey]
return subResults
def filterResults(allResults, reportKeys, optimizeKey=None):
""" Given the complete set of results generated by an experiment (passed in
'results'), filter out and return only the ones the caller wants, as
specified through 'reportKeys' and 'optimizeKey'.
A report key is a string of key names separated by colons, each key being one
level deeper into the experiment results dict. For example, 'key1:key2'.
Parameters:
-------------------------------------------------------------------------
results: dict of all results generated by an experiment
reportKeys: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKey: Which report item, if any, we will be optimizing for. This can
also be a regular expression, but is an error if it matches
more than one key from the experiment's results.
retval: (reportDict, optimizeDict)
reportDict: a dictionary of the metrics named by desiredReportKeys
optimizeDict: A dictionary containing 1 item: the full name and
value of the metric identified by the optimizeKey
"""
# Init return values
optimizeDict = dict()
# Get all available report key names for this experiment
allReportKeys = set()
_appendReportKeys(keys=allReportKeys, prefix='', results=allResults)
#----------------------------------------------------------------------------
# Extract the report items that match the regular expressions passed in reportKeys
matchingKeys = _matchReportKeys(reportKeys, allReportKeys)
# Extract the values of the desired items
reportDict = dict()
for keyName in matchingKeys:
value = _getReportItem(keyName, allResults)
reportDict[keyName] = value
# -------------------------------------------------------------------------
# Extract the report item that matches the regular expression passed in
# optimizeKey
if optimizeKey is not None:
matchingKeys = _matchReportKeys([optimizeKey], allReportKeys)
if len(matchingKeys) == 0:
raise _BadKeyError(optimizeKey)
elif len(matchingKeys) > 1:
raise _BadOptimizeKeyError(optimizeKey, matchingKeys)
optimizeKeyFullName = matchingKeys[0]
# Get the value of the optimize metric
value = _getReportItem(optimizeKeyFullName, allResults)
optimizeDict[optimizeKeyFullName] = value
reportDict[optimizeKeyFullName] = value
# Return info
return(reportDict, optimizeDict)
def _quoteAndEscape(string):
"""
string: input string (ascii or unicode)
Returns: a quoted string with characters that are represented in python via
escape sequences converted to those escape sequences
"""
assert type(string) in types.StringTypes
return pprint.pformat(string)
def _handleModelRunnerException(jobID, modelID, jobsDAO, experimentDir, logger,
e):
""" Perform standard handling of an exception that occurs while running
a model.
Parameters:
-------------------------------------------------------------------------
jobID: ID for this hypersearch job in the jobs table
modelID: model ID
jobsDAO: ClientJobsDAO instance
experimentDir: directory containing the experiment
logger: the logger to use
e: the exception that occurred
retval: (completionReason, completionMsg)
"""
msg = StringIO.StringIO()
print >>msg, "Exception occurred while running model %s: %r (%s)" % (
modelID, e, type(e))
traceback.print_exc(None, msg)
completionReason = jobsDAO.CMPL_REASON_ERROR
completionMsg = msg.getvalue()
logger.error(completionMsg)
# Write results to the model database for the error case. Ignore
# InvalidConnectionException, as this is usually caused by orphaned models
#
# TODO: do we really want to set numRecords to 0? Last updated value might
# be useful for debugging
if type(e) is not InvalidConnectionException:
jobsDAO.modelUpdateResults(modelID, results=None, numRecords=0)
# TODO: Make sure this wasn't the best model in job. If so, set the best
# appropriately
# If this was an exception that should mark the job as failed, do that
# now.
if type(e) == JobFailException:
workerCmpReason = jobsDAO.jobGetFields(jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
jobsDAO.jobSetFields(jobID, fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = ": ".join(str(i) for i in e.args)),
useConnectionID=False,
ignoreUnchanged=True)
return (completionReason, completionMsg)
def runModelGivenBaseAndParams(modelID, jobID, baseDescription, params,
predictedField, reportKeys, optimizeKey, jobsDAO,
modelCheckpointGUID, logLevel=None, predictionCacheMaxRecords=None):
""" This creates an experiment directory with a base.py description file
created from 'baseDescription' and a description.py generated from the
given params dict and then runs the experiment.
Parameters:
-------------------------------------------------------------------------
modelID: ID for this model in the models table
jobID: ID for this hypersearch job in the jobs table
baseDescription: Contents of a description.py with the base experiment
description
params: Dictionary of specific parameters to override within
the baseDescriptionFile.
predictedField: Name of the input field for which this model is being
optimized
reportKeys: Which metrics of the experiment to store into the
results dict of the model's database entry
optimizeKey: Which metric we are optimizing for
jobsDAO Jobs data access object - the interface to the
jobs database which has the model's table.
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key
logLevel: override logging level to this value, if not None
retval: (completionReason, completionMsg)
"""
from nupic.swarming.ModelRunner import OPFModelRunner
# The logger for this method
logger = logging.getLogger('com.numenta.nupic.hypersearch.utils')
# --------------------------------------------------------------------------
# Create a temp directory for the experiment and the description files
experimentDir = tempfile.mkdtemp()
try:
logger.info("Using experiment directory: %s" % (experimentDir))
# Create the decription.py from the overrides in params
paramsFilePath = os.path.join(experimentDir, 'description.py')
paramsFile = open(paramsFilePath, 'wb')
paramsFile.write(_paramsFileHead())
items = params.items()
items.sort()
for (key,value) in items:
quotedKey = _quoteAndEscape(key)
if isinstance(value, basestring):
paramsFile.write(" %s : '%s',\n" % (quotedKey , value))
else:
paramsFile.write(" %s : %s,\n" % (quotedKey , value))
paramsFile.write(_paramsFileTail())
paramsFile.close()
# Write out the base description
baseParamsFile = open(os.path.join(experimentDir, 'base.py'), 'wb')
baseParamsFile.write(baseDescription)
baseParamsFile.close()
# Store the experiment's sub-description file into the model table
# for reference
fd = open(paramsFilePath)
expDescription = fd.read()
fd.close()
jobsDAO.modelSetFields(modelID, {'genDescription': expDescription})
# Run the experiment now
try:
runner = OPFModelRunner(
modelID=modelID,
jobID=jobID,
predictedField=predictedField,
experimentDir=experimentDir,
reportKeyPatterns=reportKeys,
optimizeKeyPattern=optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=predictionCacheMaxRecords)
signal.signal(signal.SIGINT, runner.handleWarningSignal)
(completionReason, completionMsg) = runner.run()
except InvalidConnectionException:
raise
except Exception, e:
(completionReason, completionMsg) = _handleModelRunnerException(jobID,
modelID, jobsDAO, experimentDir, logger, e)
finally:
# delete our temporary directory tree
shutil.rmtree(experimentDir)
signal.signal(signal.SIGINT, signal.default_int_handler)
# Return completion reason and msg
return (completionReason, completionMsg)
def runDummyModel(modelID, jobID, params, predictedField, reportKeys,
optimizeKey, jobsDAO, modelCheckpointGUID, logLevel=None, predictionCacheMaxRecords=None):
from nupic.swarming.dummy_model_runner import OPFDummyModelRunner
# The logger for this method
logger = logging.getLogger('com.numenta.nupic.hypersearch.utils')
# Run the experiment now
try:
if type(params) is bool:
params = {}
runner = OPFDummyModelRunner(modelID=modelID,
jobID=jobID,
params=params,
predictedField=predictedField,
reportKeyPatterns=reportKeys,
optimizeKeyPattern=optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=predictionCacheMaxRecords)
(completionReason, completionMsg) = runner.run()
# The dummy model runner will call sys.exit(1) if
# NTA_TEST_sysExitFirstNModels is set and the number of models in the
# models table is <= NTA_TEST_sysExitFirstNModels
except SystemExit:
sys.exit(1)
except InvalidConnectionException:
raise
except Exception, e:
(completionReason, completionMsg) = _handleModelRunnerException(jobID,
modelID, jobsDAO, "NA",
logger, e)
# Return completion reason and msg
return (completionReason, completionMsg)
# Passed as parameter to ActivityMgr
#
# repeating: True if the activity is a repeating activite, False if one-shot
# period: period of activity's execution (number of "ticks")
# cb: a callable to call upon expiration of period; will be called
# as cb()
PeriodicActivityRequest = namedtuple("PeriodicActivityRequest",
("repeating", "period", "cb"))
class PeriodicActivityMgr(object):
"""
TODO: move to shared script so that we can share it with run_opf_experiment
"""
# iteratorHolder: a list holding one iterator; we use a list so that we can
# replace the iterator for repeating activities (a tuple would not
# allow it if the field was an imutable value)
Activity = namedtuple("Activity", ("repeating",
"period",
"cb",
"iteratorHolder"))
def __init__(self, requestedActivities):
"""
requestedActivities: a sequence of PeriodicActivityRequest elements
"""
self.__activities = []
for req in requestedActivities:
act = self.Activity(repeating=req.repeating,
period=req.period,
cb=req.cb,
iteratorHolder=[iter(xrange(req.period))])
self.__activities.append(act)
return
def tick(self):
""" Activity tick handler; services all activities
Returns: True if controlling iterator says it's okay to keep going;
False to stop
"""
# Run activities whose time has come
for act in self.__activities:
if not act.iteratorHolder[0]:
continue
try:
next(act.iteratorHolder[0])
except StopIteration:
act.cb()
if act.repeating:
act.iteratorHolder[0] = iter(xrange(act.period))
else:
act.iteratorHolder[0] = None
return True
def generatePersistentJobGUID():
"""Generates a "persistentJobGUID" value.
Parameters:
----------------------------------------------------------------------
retval: A persistentJobGUID value
"""
return "JOB_UUID1-" + str(uuid.uuid1())
def identityConversion(value, _keys):
return value
def rCopy(d, f=identityConversion, discardNoneKeys=True, deepCopy=True):
"""Recursively copies a dict and returns the result.
Args:
d: The dict to copy.
f: A function to apply to values when copying that takes the value and the
list of keys from the root of the dict to the value and returns a value
for the new dict.
discardNoneKeys: If True, discard key-value pairs when f returns None for
the value.
deepCopy: If True, all values in returned dict are true copies (not the
same object).
Returns:
A new dict with keys and values from d replaced with the result of f.
"""
# Optionally deep copy the dict.
if deepCopy:
d = copy.deepcopy(d)
newDict = {}
toCopy = [(k, v, newDict, ()) for k, v in d.iteritems()]
while len(toCopy) > 0:
k, v, d, prevKeys = toCopy.pop()
prevKeys = prevKeys + (k,)
if isinstance(v, dict):
d[k] = dict()
toCopy[0:0] = [(innerK, innerV, d[k], prevKeys)
for innerK, innerV in v.iteritems()]
else:
#print k, v, prevKeys
newV = f(v, prevKeys)
if not discardNoneKeys or newV is not None:
d[k] = newV
return newDict
def rApply(d, f):
"""Recursively applies f to the values in dict d.
Args:
d: The dict to recurse over.
f: A function to apply to values in d that takes the value and a list of
keys from the root of the dict to the value.
"""
remainingDicts = [(d, ())]
while len(remainingDicts) > 0:
current, prevKeys = remainingDicts.pop()
for k, v in current.iteritems():
keys = prevKeys + (k,)
if isinstance(v, dict):
remainingDicts.insert(0, (v, keys))
else:
f(v, keys)
def clippedObj(obj, maxElementSize=64):
"""
Return a clipped version of obj suitable for printing, This
is useful when generating log messages by printing data structures, but
don't want the message to be too long.
If passed in a dict, list, or namedtuple, each element of the structure's
string representation will be limited to 'maxElementSize' characters. This
will return a new object where the string representation of each element
has been truncated to fit within maxElementSize.
"""
# Is it a named tuple?
if hasattr(obj, '_asdict'):
obj = obj._asdict()
# Printing a dict?
if isinstance(obj, dict):
objOut = dict()
for key,val in obj.iteritems():
objOut[key] = clippedObj(val)
# Printing a list?
elif hasattr(obj, '__iter__'):
objOut = []
for val in obj:
objOut.append(clippedObj(val))
# Some other object
else:
objOut = str(obj)
if len(objOut) > maxElementSize:
objOut = objOut[0:maxElementSize] + '...'
return objOut
class ValidationError(validictory.ValidationError):
pass
def validate(value, **kwds):
""" Validate a python value against json schema:
validate(value, schemaPath)
validate(value, schemaDict)
value: python object to validate against the schema
The json schema may be specified either as a path of the file containing
the json schema or as a python dictionary using one of the
following keywords as arguments:
schemaPath: Path of file containing the json schema object.
schemaDict: Python dictionary containing the json schema object
Returns: nothing
Raises:
ValidationError when value fails json validation
"""
assert len(kwds.keys()) >= 1
assert 'schemaPath' in kwds or 'schemaDict' in kwds
schemaDict = None
if 'schemaPath' in kwds:
schemaPath = kwds.pop('schemaPath')
schemaDict = loadJsonValueFromFile(schemaPath)
elif 'schemaDict' in kwds:
schemaDict = kwds.pop('schemaDict')
try:
validictory.validate(value, schemaDict, **kwds)
except validictory.ValidationError as e:
raise ValidationError(e)
def loadJsonValueFromFile(inputFilePath):
""" Loads a json value from a file and converts it to the corresponding python
object.
inputFilePath:
Path of the json file;
Returns:
python value that represents the loaded json value
"""
with open(inputFilePath) as fileObj:
value = json.load(fileObj)
return value
def sortedJSONDumpS(obj):
"""
Return a JSON representation of obj with sorted keys on any embedded dicts.
This insures that the same object will always be represented by the same
string even if it contains dicts (where the sort order of the keys is
normally undefined).
"""
itemStrs = []
if isinstance(obj, dict):
items = obj.items()
items.sort()
for key, value in items:
itemStrs.append('%s: %s' % (json.dumps(key), sortedJSONDumpS(value)))
return '{%s}' % (', '.join(itemStrs))
elif hasattr(obj, '__iter__'):
for val in obj:
itemStrs.append(sortedJSONDumpS(val))
return '[%s]' % (', '.join(itemStrs))
else:
return json.dumps(obj)
| 24,450 | Python | .py | 578 | 36.430796 | 108 | 0.662967 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,015 | __init__.py | numenta_nupic-legacy/src/nupic/swarming/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## @package swarming
# The nupic.swarming package contains Python implementations of
# Numenta partical swarming algorithms.
| 1,102 | Python | .py | 23 | 46.869565 | 72 | 0.684601 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,016 | dummy_model_runner.py | numenta_nupic-legacy/src/nupic/swarming/dummy_model_runner.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import itertools
import json
import math
import os
import random
import sys
import time
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf import helpers
from nupic.frameworks.opf.opf_utils import ModelResult
from nupic.swarming import utils
from nupic.swarming.ModelRunner import OPFModelRunner
class OPFDummyModelRunner(OPFModelRunner):
""" This class runs a 'dummy' OPF Experiment. It will periodically update the
models db with a deterministic metric value. It can also simulate different
amounts of computation time
"""
modelIndex = 0
metrics = [lambda x: float(x+1),
lambda x: 100.0 - x-1,
lambda x: 20.0 * math.sin(x),
lambda x: (x/9.0)**2]
_DEFAULT_PARAMS = dict(delay= None,
finalDelay=None,
waitTime=None,
randomizeWait=None,
iterations=1,
metricFunctions=None,
metricValue=None,
finalize=True,
permutationParams={},
experimentDirectory=None,
makeCheckpoint=False,
sysExitModelRange=None,
delayModelRange=None,
exitAfter=None,
errModelRange=None,
sleepModelRange=None,
jobFailErr=False,
)
# Dummy streamDef.
_DUMMY_STREAMDEF = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % (os.path.join("extra", "hotgym",
"joined_mosman_2011.csv")),
info="hotGym.csv",
columns=["*"],
#last_record=-1,
),
],
aggregation = {
'hours': 1,
'fields': [
('consumption', 'sum'),
('timestamp', 'first'),
('TEMP', 'mean'),
('DEWP', 'mean'),
#('SLP', 'mean'),
#('STP', 'mean'),
('MAX', 'mean'),
('MIN', 'mean'),
('PRCP', 'sum'),
],
},
)
def __init__(self,
modelID,
jobID,
params,
predictedField,
reportKeyPatterns,
optimizeKeyPattern,
jobsDAO,
modelCheckpointGUID,
logLevel=None,
predictionCacheMaxRecords=None):
"""
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in the models table
jobID:
params: a dictionary of parameters for this dummy model. The
possible keys are:
delay: OPTIONAL-This specifies the amount of time
(in seconds) that the experiment should wait
before STARTING to process records. This is
useful for simulating workers that start/end
at different times
finalDelay: OPTIONAL-This specifies the amount of time
(in seconds) that the experiment should wait
before it conducts its finalization operations.
These operations include checking if the model
is the best model, and writing out checkpoints.
waitTime: OPTIONAL-The amount of time (in seconds)
to wait in a busy loop to simulate
computation time on EACH ITERATION
randomizeWait: OPTIONAL-([0.0-1.0] ). Default:None
If set to a value, the above specified
wait time will be randomly be dithered by
+/- <randomizeWait>% of the specfied value.
For example, if randomizeWait=0.2, the wait
time will be dithered by +/- 20% of its value.
iterations: OPTIONAL-How many iterations to run the model
for. -1 means run forever (default=1)
metricFunctions: OPTIONAL-A list of single argument functions
serialized as strings, which return the metric
value given the record number.
Mutually exclusive with metricValue
metricValue: OPTIONAL-A single value to use for the metric
value (used to debug hypersearch).
Mutually exclusive with metricFunctions
finalize: OPTIONAL-(True/False). Default:True
When False, this will prevent the model from
recording it's metrics and performing other
functions that it usually performs after the
model has finished running
permutationParams: A dict containing the instances of all the
variables being permuted over
experimentDirectory: REQUIRED-An absolute path to a directory
with a valid description.py file.
NOTE: This does not actually affect the
running of the model or the metrics
produced. It is required to create certain
objects (such as the output stream)
makeCheckpoint: True to actually write a checkpoint out to
disk (default: False)
sysExitModelRange: A string containing two integers 'firstIdx,
endIdx'. When present, if we are running the
firstIdx'th model up to but not including the
endIdx'th model, then do a sys.exit() while
running the model. This causes the worker to
exit, simulating an orphaned model.
delayModelRange: A string containing two integers 'firstIdx,
endIdx'. When present, if we are running the
firstIdx'th model up to but not including the
endIdx'th model, then do a delay of 10 sec.
while running the model. This causes the
worker to run slower and for some other worker
to think the model should be orphaned.
exitAfter: The number of iterations after which the model
should perform a sys exit. This is an
alternative way of creating an orphaned model
that use's the dummmy model's modelIndex
instead of the modelID
errModelRange: A string containing two integers 'firstIdx,
endIdx'. When present, if we are running the
firstIdx'th model up to but not including the
endIdx'th model, then raise an exception while
running the model. This causes the model to
fail with a CMPL_REASON_ERROR reason
sleepModelRange: A string containing 3 integers 'firstIdx,
endIdx: delay'. When present, if we are running
the firstIdx'th model up to but not including
the endIdx'th model, then sleep for delay
seconds at the beginning of the run.
jobFailErr: If true, model will raise a JobFailException
which should cause the job to be marked as
failed and immediately cancel all other workers.
predictedField: Name of the input field for which this model is being
optimized
reportKeyPatterns: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKeyPattern: Which report item, if any, we will be optimizing for.
This can also be a regular expression, but is an error
if it matches more than one key from the experiment's
results.
jobsDAO: Jobs data access object - the interface to the
jobs database which has the model's table.
modelCheckpointGUID:
A persistent, globally-unique identifier for
constructing the model checkpoint key
logLevel: override logging level to this value, if not None
predictionCacheMaxRecords:
Maximum number of records for the prediction output cache.
Pass None for the default value.
"""
super(OPFDummyModelRunner, self).__init__(modelID=modelID,
jobID=jobID,
predictedField=predictedField,
experimentDir=None,
reportKeyPatterns=reportKeyPatterns,
optimizeKeyPattern=optimizeKeyPattern,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=None)
self._predictionCacheMaxRecords = predictionCacheMaxRecords
self._streamDef = copy.deepcopy(self._DUMMY_STREAMDEF)
self._params = copy.deepcopy(self._DEFAULT_PARAMS)
# -----------------------------------------------------------------------
# Read the index of the current model in the test
if 'permutationParams' in params \
and '__model_num' in params['permutationParams']:
self.modelIndex=params['permutationParams']['__model_num']
else:
self.modelIndex = OPFDummyModelRunner.modelIndex
OPFDummyModelRunner.modelIndex += 1
# -----------------------------------------------------------------------
self._loadDummyModelParameters(params)
# =========================================================================
# Load parameters into instance variables
# =========================================================================
self._logger.debug("Using Dummy model params: %s", self._params)
self._busyWaitTime = self._params['waitTime']
self._iterations = self._params['iterations']
self._doFinalize = self._params['finalize']
self._delay = self._params['delay']
self._sleepModelRange = self._params['sleepModelRange']
self._makeCheckpoint = self._params['makeCheckpoint']
self._finalDelay = self._params['finalDelay']
self._exitAfter = self._params['exitAfter']
# =========================================================================
# Randomize Wait time, if necessary
# =========================================================================
self.randomizeWait = self._params['randomizeWait']
if self._busyWaitTime is not None:
self.__computeWaitTime()
# =========================================================================
# Load the appropriate metric value or metric function
# =========================================================================
if self._params['metricFunctions'] is not None \
and self._params['metricValue'] is not None:
raise RuntimeError("Error, only 1 of 'metricFunctions' or 'metricValue'"\
" can be passed to OPFDummyModelRunner params ")
self.metrics = None
self.metricValue = None
if self._params['metricFunctions'] is not None:
self.metrics = eval(self._params['metricFunctions'])
elif self._params['metricValue'] is not None:
self.metricValue = float(self._params['metricValue'])
else:
self.metrics = OPFDummyModelRunner.metrics[0]
# =========================================================================
# Create an OpfExperiment instance, if a directory is specified
# =========================================================================
if self._params['experimentDirectory'] is not None:
self._model = self.__createModel(self._params['experimentDirectory'])
self.__fieldInfo = self._model.getFieldInfo()
# =========================================================================
# Get the sysExit model range
# =========================================================================
self._sysExitModelRange = self._params['sysExitModelRange']
if self._sysExitModelRange is not None:
self._sysExitModelRange = [int(x) for x in self._sysExitModelRange.split(',')]
# =========================================================================
# Get the delay model range
# =========================================================================
self._delayModelRange = self._params['delayModelRange']
if self._delayModelRange is not None:
self._delayModelRange = [int(x) for x in self._delayModelRange.split(',')]
# =========================================================================
# Get the errModel range
# =========================================================================
self._errModelRange = self._params['errModelRange']
if self._errModelRange is not None:
self._errModelRange = [int(x) for x in self._errModelRange.split(',')]
self._computModelDelay()
# Get the jobFailErr boolean
self._jobFailErr = self._params['jobFailErr']
self._logger.debug("Dummy Model %d params %r", self._modelID, self._params)
def _loadDummyModelParameters(self, params):
""" Loads all the parameters for this dummy model. For any paramters
specified as lists, read the appropriate value for this model using the model
index """
for key, value in params.iteritems():
if type(value) == list:
index = self.modelIndex % len(params[key])
self._params[key] = params[key][index]
else:
self._params[key] = params[key]
def _computModelDelay(self):
""" Computes the amount of time (if any) to delay the run of this model.
This can be determined by two mutually exclusive parameters:
delay and sleepModelRange.
'delay' specifies the number of seconds a model should be delayed. If a list
is specified, the appropriate amount of delay is determined by using the
model's modelIndex property.
However, this doesn't work when testing orphaned models, because the
modelIndex will be the same for every recovery attempt. Therefore, every
recovery attempt will also be delayed and potentially orphaned.
'sleepModelRange' doesn't use the modelIndex property for a model, but rather
sees which order the model is in the database, and uses that to determine
whether or not a model should be delayed.
"""
# 'delay' and 'sleepModelRange' are mutually exclusive
if self._params['delay'] is not None \
and self._params['sleepModelRange'] is not None:
raise RuntimeError("Only one of 'delay' or "
"'sleepModelRange' may be specified")
# Get the sleepModel range
if self._sleepModelRange is not None:
range, delay = self._sleepModelRange.split(':')
delay = float(delay)
range = map(int, range.split(','))
modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID)
modelIDs.sort()
range[1] = min(range[1], len(modelIDs))
# If the model is in range, add the delay
if self._modelID in modelIDs[range[0]:range[1]]:
self._delay = delay
else:
self._delay = self._params['delay']
def _getMetrics(self):
""" Protected function that can be overridden by subclasses. Its main purpose
is to allow the the OPFDummyModelRunner to override this with deterministic
values
Returns: All the metrics being computed for this model
"""
metric = None
if self.metrics is not None:
metric = self.metrics(self._currentRecordIndex+1)
elif self.metricValue is not None:
metric = self.metricValue
else:
raise RuntimeError('No metrics or metric value specified for dummy model')
return {self._optimizeKeyPattern:metric}
def run(self):
""" Runs the given OPF task against the given Model instance """
self._logger.debug("Starting Dummy Model: modelID=%s;" % (self._modelID))
# =========================================================================
# Initialize periodic activities (e.g., for model result updates)
# =========================================================================
periodic = self._initPeriodicActivities()
self._optimizedMetricLabel = self._optimizeKeyPattern
self._reportMetricLabels = [self._optimizeKeyPattern]
# =========================================================================
# Create our top-level loop-control iterator
# =========================================================================
if self._iterations >= 0:
iterTracker = iter(xrange(self._iterations))
else:
iterTracker = iter(itertools.count())
# =========================================================================
# This gets set in the unit tests. It tells the worker to sys exit
# the first N models. This is how we generate orphaned models
doSysExit = False
if self._sysExitModelRange is not None:
modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)
modelIDs = [x[0] for x in modelAndCounters]
modelIDs.sort()
(beg,end) = self._sysExitModelRange
if self._modelID in modelIDs[int(beg):int(end)]:
doSysExit = True
if self._delayModelRange is not None:
modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)
modelIDs = [x[0] for x in modelAndCounters]
modelIDs.sort()
(beg,end) = self._delayModelRange
if self._modelID in modelIDs[int(beg):int(end)]:
time.sleep(10)
# DEBUG!!!! infinite wait if we have 50 models
#if len(modelIDs) >= 50:
# jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
# while not jobCancel:
# time.sleep(1)
# jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
if self._errModelRange is not None:
modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)
modelIDs = [x[0] for x in modelAndCounters]
modelIDs.sort()
(beg,end) = self._errModelRange
if self._modelID in modelIDs[int(beg):int(end)]:
raise RuntimeError("Exiting with error due to errModelRange parameter")
# =========================================================================
# Delay, if necessary
if self._delay is not None:
time.sleep(self._delay)
# =========================================================================
# Run it!
# =========================================================================
self._currentRecordIndex = 0
while True:
# =========================================================================
# Check if the model should be stopped
# =========================================================================
# If killed by a terminator, stop running
if self._isKilled:
break
# If job stops or hypersearch ends, stop running
if self._isCanceled:
break
# If model is mature, stop running ONLY IF we are not the best model
# for the job. Otherwise, keep running so we can keep returning
# predictions to the user
if self._isMature:
if not self._isBestModel:
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
# =========================================================================
# Get the the next record, and "write it"
# =========================================================================
try:
self._currentRecordIndex = next(iterTracker)
except StopIteration:
break
# "Write" a dummy output value. This is used to test that the batched
# writing works properly
self._writePrediction(ModelResult(None, None, None, None))
periodic.tick()
# =========================================================================
# Compute wait times. See if model should exit
# =========================================================================
if self.__shouldSysExit(self._currentRecordIndex):
sys.exit(1)
# Simulate computation time
if self._busyWaitTime is not None:
time.sleep(self._busyWaitTime)
self.__computeWaitTime()
# Asked to abort after so many iterations?
if doSysExit:
sys.exit(1)
# Asked to raise a jobFailException?
if self._jobFailErr:
raise utils.JobFailException("E10000",
"dummyModel's jobFailErr was True.")
# =========================================================================
# Handle final operations
# =========================================================================
if self._doFinalize:
if not self._makeCheckpoint:
self._model = None
# Delay finalization operation
if self._finalDelay is not None:
time.sleep(self._finalDelay)
self._finalize()
self._logger.info("Finished: modelID=%r "% (self._modelID))
return (self._cmpReason, None)
def __computeWaitTime(self):
if self.randomizeWait is not None:
self._busyWaitTime = random.uniform((1.0-self.randomizeWait) * self._busyWaitTime,
(1.0+self.randomizeWait) * self._busyWaitTime)
def __createModel(self, expDir):
# -----------------------------------------------------------------------
# Load the experiment's description.py module
descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
expDir)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
# -----------------------------------------------------------------------
# Construct the model instance
modelDescription = expIface.getModelDescription()
return ModelFactory.create(modelDescription)
def _createPredictionLogger(self):
"""
Creates the model's PredictionLogger object, which is an interface to write
model results to a permanent storage location
"""
class DummyLogger:
def writeRecord(self, record): pass
def writeRecords(self, records, progressCB): pass
def close(self): pass
self._predictionLogger = DummyLogger()
def __shouldSysExit(self, iteration):
"""
Checks to see if the model should exit based on the exitAfter dummy
parameter
"""
if self._exitAfter is None \
or iteration < self._exitAfter:
return False
results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, ['params'])
modelIDs = [e[0] for e in results]
modelNums = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in results]
sameModelNumbers = filter(lambda x: x[1] == self.modelIndex,
zip(modelIDs, modelNums))
firstModelID = min(zip(*sameModelNumbers)[0])
return firstModelID == self._modelID
| 25,423 | Python | .py | 492 | 39.359756 | 89 | 0.530789 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,017 | hypersearch_worker.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch_worker.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import os
import pprint
from optparse import OptionParser
import random
import logging
import json
import hashlib
import itertools
import StringIO
import traceback
from nupic.support import initLogging
from nupic.support.configuration import Configuration
from nupic.swarming.hypersearch.extended_logger import ExtendedLogger
from nupic.swarming.hypersearch.error_codes import ErrorCodes
from nupic.swarming.utils import clippedObj, validate
from nupic.database.client_jobs_dao import ClientJobsDAO
from hypersearch_v2 import HypersearchV2
class HypersearchWorker(object):
""" The HypersearchWorker is responsible for evaluating one or more models
within a specific Hypersearch job.
One or more instances of this object are launched by the engine, each in a
separate process. When running within Hadoop, each instance is run within a
separate Hadoop Map Task. Each instance gets passed the parameters of the
hypersearch via a reference to a search job request record in a "jobs" table
within a database.
From there, each instance will try different models, based on the search
parameters and share it's results via a "models" table within the same
database.
The general flow for each worker is this:
while more models to evaluate:
pick a model based on information about the job and the other models that
have already been evaluated.
mark the model as "in progress" in the "models" table.
evaluate the model, storing metrics on performance periodically back to
the model's entry in the "models" table.
mark the model as "completed" in the "models" table
"""
def __init__(self, options, cmdLineArgs):
""" Instantiate the Hypersearch worker
Parameters:
---------------------------------------------------------------------
options: The command line options. See the main() method for a
description of these options
cmdLineArgs: Copy of the command line arguments, so we can place them
in the log
"""
# Save options
self._options = options
# Instantiate our logger
self.logger = logging.getLogger(".".join(
['com.numenta.nupic.swarming', self.__class__.__name__]))
# Override log level?
if options.logLevel is not None:
self.logger.setLevel(options.logLevel)
self.logger.info("Launched with command line arguments: %s" %
str(cmdLineArgs))
self.logger.debug("Env variables: %s" % (pprint.pformat(os.environ)))
#self.logger.debug("Value of nupic.hypersearch.modelOrphanIntervalSecs: %s" \
# % Configuration.get('nupic.hypersearch.modelOrphanIntervalSecs'))
# Init random seed
random.seed(42)
# This will hold an instance of a Hypersearch class which handles
# the logic of which models to create/evaluate.
self._hs = None
# -------------------------------------------------------------------------
# These elements form a cache of the update counters we last received for
# the all models in the database. It is used to determine which models we
# have to notify the Hypersearch object that the results have changed.
# This is a dict of modelID -> updateCounter
self._modelIDCtrDict = dict()
# This is the above is a list of tuples: (modelID, updateCounter)
self._modelIDCtrList = []
# This is just the set of modelIDs (keys)
self._modelIDSet = set()
# This will be filled in by run()
self._workerID = None
def _processUpdatedModels(self, cjDAO):
""" For all models that modified their results since last time this method
was called, send their latest results to the Hypersearch implementation.
"""
# Get the latest update counters. This returns a list of tuples:
# (modelID, updateCounter)
curModelIDCtrList = cjDAO.modelsGetUpdateCounters(self._options.jobID)
if len(curModelIDCtrList) == 0:
return
self.logger.debug("current modelID/updateCounters: %s" \
% (str(curModelIDCtrList)))
self.logger.debug("last modelID/updateCounters: %s" \
% (str(self._modelIDCtrList)))
# --------------------------------------------------------------------
# Find out which ones have changed update counters. Since these are models
# that the Hypersearch implementation already knows about, we don't need to
# send params or paramsHash
curModelIDCtrList = sorted(curModelIDCtrList)
numItems = len(curModelIDCtrList)
# Each item in the list we are filtering contains:
# (idxIntoModelIDCtrList, (modelID, curCtr), (modelID, oldCtr))
# We only want to keep the ones where the oldCtr != curCtr
changedEntries = filter(lambda x:x[1][1] != x[2][1],
itertools.izip(xrange(numItems), curModelIDCtrList,
self._modelIDCtrList))
if len(changedEntries) > 0:
# Update values in our cache
self.logger.debug("changedEntries: %s", str(changedEntries))
for entry in changedEntries:
(idx, (modelID, curCtr), (_, oldCtr)) = entry
self._modelIDCtrDict[modelID] = curCtr
assert (self._modelIDCtrList[idx][0] == modelID)
assert (curCtr != oldCtr)
self._modelIDCtrList[idx][1] = curCtr
# Tell Hypersearch implementation of the updated results for each model
changedModelIDs = [x[1][0] for x in changedEntries]
modelResults = cjDAO.modelsGetResultAndStatus(changedModelIDs)
for mResult in modelResults:
results = mResult.results
if results is not None:
results = json.loads(results)
self._hs.recordModelProgress(modelID=mResult.modelId,
modelParams = None,
modelParamsHash = mResult.engParamsHash,
results = results,
completed = (mResult.status == cjDAO.STATUS_COMPLETED),
completionReason = mResult.completionReason,
matured = mResult.engMatured,
numRecords = mResult.numRecords)
# --------------------------------------------------------------------
# Figure out which ones are newly arrived and add them to our
# cache
curModelIDSet = set([x[0] for x in curModelIDCtrList])
newModelIDs = curModelIDSet.difference(self._modelIDSet)
if len(newModelIDs) > 0:
# Add new modelID and counters to our cache
self._modelIDSet.update(newModelIDs)
curModelIDCtrDict = dict(curModelIDCtrList)
# Get the results for each of these models and send them to the
# Hypersearch implementation.
modelInfos = cjDAO.modelsGetResultAndStatus(newModelIDs)
modelInfos.sort()
modelParamsAndHashs = cjDAO.modelsGetParams(newModelIDs)
modelParamsAndHashs.sort()
for (mResult, mParamsAndHash) in itertools.izip(modelInfos,
modelParamsAndHashs):
modelID = mResult.modelId
assert (modelID == mParamsAndHash.modelId)
# Update our cache of IDs and update counters
self._modelIDCtrDict[modelID] = curModelIDCtrDict[modelID]
self._modelIDCtrList.append([modelID, curModelIDCtrDict[modelID]])
# Tell the Hypersearch implementation of the new model
results = mResult.results
if results is not None:
results = json.loads(mResult.results)
self._hs.recordModelProgress(modelID = modelID,
modelParams = json.loads(mParamsAndHash.params),
modelParamsHash = mParamsAndHash.engParamsHash,
results = results,
completed = (mResult.status == cjDAO.STATUS_COMPLETED),
completionReason = (mResult.completionReason),
matured = mResult.engMatured,
numRecords = mResult.numRecords)
# Keep our list sorted
self._modelIDCtrList.sort()
def run(self):
""" Run this worker.
Parameters:
----------------------------------------------------------------------
retval: jobID of the job we ran. This is used by unit test code
when calling this working using the --params command
line option (which tells this worker to insert the job
itself).
"""
# Easier access to options
options = self._options
# ---------------------------------------------------------------------
# Connect to the jobs database
self.logger.info("Connecting to the jobs database")
cjDAO = ClientJobsDAO.get()
# Get our worker ID
self._workerID = cjDAO.getConnectionID()
if options.clearModels:
cjDAO.modelsClearAll()
# -------------------------------------------------------------------------
# if params were specified on the command line, insert a new job using
# them.
if options.params is not None:
options.jobID = cjDAO.jobInsert(client='hwTest', cmdLine="echo 'test mode'",
params=options.params, alreadyRunning=True,
minimumWorkers=1, maximumWorkers=1,
jobType = cjDAO.JOB_TYPE_HS)
if options.workerID is not None:
wID = options.workerID
else:
wID = self._workerID
buildID = Configuration.get('nupic.software.buildNumber', 'N/A')
logPrefix = '<BUILDID=%s, WORKER=HW, WRKID=%s, JOBID=%s> ' % \
(buildID, wID, options.jobID)
ExtendedLogger.setLogPrefix(logPrefix)
# ---------------------------------------------------------------------
# Get the search parameters
# If asked to reset the job status, do that now
if options.resetJobStatus:
cjDAO.jobSetFields(options.jobID,
fields={'workerCompletionReason': ClientJobsDAO.CMPL_REASON_SUCCESS,
'cancel': False,
#'engWorkerState': None
},
useConnectionID=False,
ignoreUnchanged=True)
jobInfo = cjDAO.jobInfo(options.jobID)
self.logger.info("Job info retrieved: %s" % (str(clippedObj(jobInfo))))
# ---------------------------------------------------------------------
# Instantiate the Hypersearch object, which will handle the logic of
# which models to create when we need more to evaluate.
jobParams = json.loads(jobInfo.params)
# Validate job params
jsonSchemaPath = os.path.join(os.path.dirname(__file__),
"jsonschema",
"jobParamsSchema.json")
validate(jobParams, schemaPath=jsonSchemaPath)
hsVersion = jobParams.get('hsVersion', None)
if hsVersion == 'v2':
self._hs = HypersearchV2(searchParams=jobParams, workerID=self._workerID,
cjDAO=cjDAO, jobID=options.jobID, logLevel=options.logLevel)
else:
raise RuntimeError("Invalid Hypersearch implementation (%s) specified" \
% (hsVersion))
# =====================================================================
# The main loop.
try:
exit = False
numModelsTotal = 0
print >>sys.stderr, "reporter:status:Evaluating first model..."
while not exit:
# ------------------------------------------------------------------
# Choose a model to evaluate
batchSize = 10 # How many to try at a time.
modelIDToRun = None
while modelIDToRun is None:
if options.modelID is None:
# -----------------------------------------------------------------
# Get the latest results on all running models and send them to
# the Hypersearch implementation
# This calls cjDAO.modelsGetUpdateCounters(), compares the
# updateCounters with what we have cached, fetches the results for the
# changed and new models, and sends those to the Hypersearch
# implementation's self._hs.recordModelProgress() method.
self._processUpdatedModels(cjDAO)
# --------------------------------------------------------------------
# Create a new batch of models
(exit, newModels) = self._hs.createModels(numModels = batchSize)
if exit:
break
# No more models left to create, just loop. The _hs is waiting for
# all remaining running models to complete, and may pick up on an
# orphan if it detects one.
if len(newModels) == 0:
continue
# Try and insert one that we will run
for (modelParams, modelParamsHash, particleHash) in newModels:
jsonModelParams = json.dumps(modelParams)
(modelID, ours) = cjDAO.modelInsertAndStart(options.jobID,
jsonModelParams, modelParamsHash, particleHash)
# Some other worker is already running it, tell the Hypersearch object
# so that it doesn't try and insert it again
if not ours:
mParamsAndHash = cjDAO.modelsGetParams([modelID])[0]
mResult = cjDAO.modelsGetResultAndStatus([modelID])[0]
results = mResult.results
if results is not None:
results = json.loads(results)
modelParams = json.loads(mParamsAndHash.params)
particleHash = cjDAO.modelsGetFields(modelID,
['engParticleHash'])[0]
particleInst = "%s.%s" % (
modelParams['particleState']['id'],
modelParams['particleState']['genIdx'])
self.logger.info("Adding model %d to our internal DB " \
"because modelInsertAndStart() failed to insert it: " \
"paramsHash=%s, particleHash=%s, particleId='%s'", modelID,
mParamsAndHash.engParamsHash.encode('hex'),
particleHash.encode('hex'), particleInst)
self._hs.recordModelProgress(modelID = modelID,
modelParams = modelParams,
modelParamsHash = mParamsAndHash.engParamsHash,
results = results,
completed = (mResult.status == cjDAO.STATUS_COMPLETED),
completionReason = mResult.completionReason,
matured = mResult.engMatured,
numRecords = mResult.numRecords)
else:
modelIDToRun = modelID
break
else:
# A specific modelID was passed on the command line
modelIDToRun = int(options.modelID)
mParamsAndHash = cjDAO.modelsGetParams([modelIDToRun])[0]
modelParams = json.loads(mParamsAndHash.params)
modelParamsHash = mParamsAndHash.engParamsHash
# Make us the worker
cjDAO.modelSetFields(modelIDToRun,
dict(engWorkerConnId=self._workerID))
if False:
# Change the hash and params of the old entry so that we can
# create a new model with the same params
for attempt in range(1000):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (modelIDToRun,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (modelIDToRun,
attempt)).digest()
try:
cjDAO.modelSetFields(modelIDToRun,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
(modelIDToRun, ours) = cjDAO.modelInsertAndStart(options.jobID,
mParamsAndHash.params, modelParamsHash)
# ^^^ end while modelIDToRun ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ---------------------------------------------------------------
# We have a model, evaluate it now
# All done?
if exit:
break
# Run the model now
self.logger.info("RUNNING MODEL GID=%d, paramsHash=%s, params=%s",
modelIDToRun, modelParamsHash.encode('hex'), modelParams)
# ---------------------------------------------------------------------
# Construct model checkpoint GUID for this model:
# jobParams['persistentJobGUID'] contains the client's (e.g., API Server)
# persistent, globally-unique model identifier, which is what we need;
persistentJobGUID = jobParams['persistentJobGUID']
assert persistentJobGUID, "persistentJobGUID: %r" % (persistentJobGUID,)
modelCheckpointGUID = jobInfo.client + "_" + persistentJobGUID + (
'_' + str(modelIDToRun))
self._hs.runModel(modelID=modelIDToRun, jobID = options.jobID,
modelParams=modelParams, modelParamsHash=modelParamsHash,
jobsDAO=cjDAO, modelCheckpointGUID=modelCheckpointGUID)
# TODO: don't increment for orphaned models
numModelsTotal += 1
self.logger.info("COMPLETED MODEL GID=%d; EVALUATED %d MODELs",
modelIDToRun, numModelsTotal)
print >>sys.stderr, "reporter:status:Evaluated %d models..." % \
(numModelsTotal)
print >>sys.stderr, "reporter:counter:HypersearchWorker,numModels,1"
if options.modelID is not None:
exit = True
# ^^^ end while not exit
finally:
# Provide Hypersearch instance an opportunity to clean up temporary files
self._hs.close()
self.logger.info("FINISHED. Evaluated %d models." % (numModelsTotal))
print >>sys.stderr, "reporter:status:Finished, evaluated %d models" % (numModelsTotal)
return options.jobID
helpString = \
"""%prog [options]
This script runs as a Hypersearch worker process. It loops, looking for and
evaluating prospective models from a Hypersearch database.
"""
def main(argv):
"""
The main function of the HypersearchWorker script. This parses the command
line arguments, instantiates a HypersearchWorker instance, and then
runs it.
Parameters:
----------------------------------------------------------------------
retval: jobID of the job we ran. This is used by unit test code
when calling this working using the --params command
line option (which tells this worker to insert the job
itself).
"""
parser = OptionParser(helpString)
parser.add_option("--jobID", action="store", type="int", default=None,
help="jobID of the job within the dbTable [default: %default].")
parser.add_option("--modelID", action="store", type="str", default=None,
help=("Tell worker to re-run this model ID. When specified, jobID "
"must also be specified [default: %default]."))
parser.add_option("--workerID", action="store", type="str", default=None,
help=("workerID of the scheduler's SlotAgent (GenericWorker) that "
"hosts this SpecializedWorker [default: %default]."))
parser.add_option("--params", action="store", default=None,
help="Create and execute a new hypersearch request using this JSON " \
"format params string. This is helpful for unit tests and debugging. " \
"When specified jobID must NOT be specified. [default: %default].")
parser.add_option("--clearModels", action="store_true", default=False,
help="clear out the models table before starting [default: %default].")
parser.add_option("--resetJobStatus", action="store_true", default=False,
help="Reset the job status before starting [default: %default].")
parser.add_option("--logLevel", action="store", type="int", default=None,
help="override default log level. Pass in an integer value that "
"represents the desired logging level (10=logging.DEBUG, "
"20=logging.INFO, etc.) [default: %default].")
# Evaluate command line arguments
(options, args) = parser.parse_args(argv[1:])
if len(args) != 0:
raise RuntimeError("Expected no command line arguments, but got: %s" % \
(args))
if (options.jobID and options.params):
raise RuntimeError("--jobID and --params can not be used at the same time")
if (options.jobID is None and options.params is None):
raise RuntimeError("Either --jobID or --params must be specified.")
initLogging(verbose=True)
# Instantiate the HypersearchWorker and run it
hst = HypersearchWorker(options, argv[1:])
# Normal use. This is one of among a number of workers. If we encounter
# an exception at the outer loop here, we fail the entire job.
if options.params is None:
try:
jobID = hst.run()
except Exception, e:
jobID = options.jobID
msg = StringIO.StringIO()
print >>msg, "%s: Exception occurred in Hypersearch Worker: %r" % \
(ErrorCodes.hypersearchLogicErr, e)
traceback.print_exc(None, msg)
completionReason = ClientJobsDAO.CMPL_REASON_ERROR
completionMsg = msg.getvalue()
hst.logger.error(completionMsg)
# If no other worker already marked the job as failed, do so now.
jobsDAO = ClientJobsDAO.get()
workerCmpReason = jobsDAO.jobGetFields(options.jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
jobsDAO.jobSetFields(options.jobID, fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = completionMsg),
useConnectionID=False,
ignoreUnchanged=True)
# Run just 1 worker for the entire job. Used for unit tests that run in
# 1 process
else:
jobID = None
completionReason = ClientJobsDAO.CMPL_REASON_SUCCESS
completionMsg = "Success"
try:
jobID = hst.run()
except Exception, e:
jobID = hst._options.jobID
completionReason = ClientJobsDAO.CMPL_REASON_ERROR
completionMsg = "ERROR: %s" % (e,)
raise
finally:
if jobID is not None:
cjDAO = ClientJobsDAO.get()
cjDAO.jobSetCompleted(jobID=jobID,
completionReason=completionReason,
completionMsg=completionMsg)
return jobID
if __name__ == "__main__":
logging.setLoggerClass(ExtendedLogger)
buildID = Configuration.get('nupic.software.buildNumber', 'N/A')
logPrefix = '<BUILDID=%s, WORKER=HS, WRKID=N/A, JOBID=N/A> ' % buildID
ExtendedLogger.setLogPrefix(logPrefix)
try:
main(sys.argv)
except:
logging.exception("HypersearchWorker is exiting with unhandled exception; "
"argv=%r", sys.argv)
raise
| 24,271 | Python | .py | 485 | 40.245361 | 90 | 0.611621 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,018 | hypersearch_v2.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch_v2.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import os
import time
import logging
import json
import hashlib
import itertools
import StringIO
import shutil
import tempfile
import copy
import pprint
from nupic.swarming.hypersearch.permutation_helpers import *
from nupic.swarming.hypersearch.particle import Particle
from nupic.swarming.hypersearch.error_codes import ErrorCodes
from nupic.swarming.hypersearch.swarm_terminator import SwarmTerminator
from nupic.swarming.hypersearch.hs_state import HsState, HsSearchType
from nupic.frameworks.opf import helpers
from nupic.swarming.experiment_utils import InferenceType
from nupic.swarming.utils import sortedJSONDumpS, rApply, rCopy
from nupic.swarming.utils import clippedObj
from nupic.swarming.utils import (runModelGivenBaseAndParams, runDummyModel)
from nupic.database.client_jobs_dao import (
ClientJobsDAO, InvalidConnectionException)
from nupic.swarming.exp_generator.experiment_generator import expGenerator
def _flattenKeys(keys):
return '|'.join(keys)
class ResultsDB(object):
"""This class holds all the information we have accumulated on completed
models, which particles were used, etc.
When we get updated results sent to us (via recordModelProgress), we
record it here for access later by various functions in this module.
"""
def __init__(self, hsObj):
""" Instantiate our results database
Parameters:
--------------------------------------------------------------------
hsObj: Reference to the HypersearchV2 instance
"""
self._hsObj = hsObj
# This list holds all the results we have so far on every model. In
# addition, we maintain mutliple other data structures which provide
# faster access into portions of this list
self._allResults = []
# Models that completed with errors and all completed.
# These are used to determine when we should abort because of too many
# errors
self._errModels = set()
self._numErrModels = 0
self._completedModels = set()
self._numCompletedModels = 0
# Map of the model ID to index of result in _allResults
self._modelIDToIdx = dict()
# The global best result on the optimize metric so far, and the model ID
self._bestResult = numpy.inf
self._bestModelID = None
# This is a dict of dicts. The top level dict has the swarmId as the key.
# Each entry is a dict of genIdx: (modelId, errScore) entries.
self._swarmBestOverall = dict()
# For each swarm, we keep track of how many particles we have per generation
# The key is the swarmId, the value is a list of the number of particles
# at each generation
self._swarmNumParticlesPerGeneration = dict()
# The following variables are used to support the
# getMaturedSwarmGenerations() call.
#
# The _modifiedSwarmGens set contains the set of (swarmId, genIdx) tuples
# that have had results reported to them since the last time
# getMaturedSwarmGenerations() was called.
#
# The maturedSwarmGens contains (swarmId,genIdx) tuples, one for each
# swarm generation index which we have already detected has matured. This
# insures that if by chance we get a rogue report from a model in a swarm
# generation index which we have already assumed was matured that we won't
# report on it again.
self._modifiedSwarmGens = set()
self._maturedSwarmGens = set()
# For each particle, we keep track of it's best score (across all
# generations) and the position it was at when it got that score. The keys
# in this dict are the particleId, the values are (bestResult, position),
# where position is a dict with varName:position items in it.
self._particleBest = dict()
# For each particle, we keep track of it's latest generation index.
self._particleLatestGenIdx = dict()
# For each swarm, we keep track of which models are in it. The key
# is the swarmId, the value is a list of indexes into self._allResults.
self._swarmIdToIndexes = dict()
# ParamsHash to index mapping
self._paramsHashToIndexes = dict()
def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore
def getNumErrModels(self):
"""Return number of models that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return self._numErrModels
def getErrModelIds(self):
"""Return list of models IDs that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return list(self._errModels)
def getNumCompletedModels(self):
"""Return total number of models that completed.
Parameters:
---------------------------------------------------------------------
retval: # if models that completed
"""
return self._numCompletedModels
def getModelIDFromParamsHash(self, paramsHash):
""" Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found
"""
entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)
if entryIdx is not None:
return self._allResults[entryIdx]['modelID']
else:
return None
def numModels(self, swarmId=None, includeHidden=False):
"""Return the total # of models we have in our database (if swarmId is
None) or in a specific swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders
in this swarm. For example '__address_encoder.__gym_encoder'
includeHidden: If False, this will only return the number of models
that are not hidden (i.e. orphanned, etc.)
retval: numModels
"""
# Count all models
if includeHidden:
if swarmId is None:
return len(self._allResults)
else:
return len(self._swarmIdToIndexes.get(swarmId, []))
# Only count non-hidden models
else:
if swarmId is None:
entries = self._allResults
else:
entries = [self._allResults[entryIdx]
for entryIdx in self._swarmIdToIndexes.get(swarmId,[])]
return len([entry for entry in entries if not entry['hidden']])
def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):
"""Return the model ID of the model with the best result so far and
it's score on the optimize metric. If swarm is None, then it returns
the global best, otherwise it returns the best for the given swarm
for all generatons up to and including genIdx.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: consider the best in all generations up to and including this
generation if not None.
retval: (modelID, result)
"""
if swarmId is None:
return (self._bestModelID, self._bestResult)
else:
if swarmId not in self._swarmBestOverall:
return (None, numpy.inf)
# Get the best score, considering the appropriate generations
genScores = self._swarmBestOverall[swarmId]
bestModelId = None
bestScore = numpy.inf
for (i, (modelId, errScore)) in enumerate(genScores):
if genIdx is not None and i > genIdx:
break
if errScore < bestScore:
bestScore = errScore
bestModelId = modelId
return (bestModelId, bestScore)
def getParticleInfo(self, modelId):
"""Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured)
"""
entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'],
entry['completed'], entry['matured'])
def getParticleInfos(self, swarmId=None, genIdx=None, completed=None,
matured=None, lastDescendent=False):
"""Return a list of particleStates for all particles we know about in
the given swarm, their model Ids, and metric results.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
completed: If not None, only return particles of the given state (either
completed if 'completed' is True, or running if 'completed'
is false
matured: If not None, only return particles of the given state (either
matured if 'matured' is True, or not matured if 'matured'
is false. Note that any model which has completed is also
considered matured.
lastDescendent: If True, only return particles that are the last descendent,
that is, the highest generation index for a given particle Id
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
# The indexes of all the models in this swarm. This list excludes hidden
# (orphaned) models.
if swarmId is not None:
entryIdxs = self._swarmIdToIndexes.get(swarmId, [])
else:
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
entry = self._allResults[idx]
# If this entry is hidden (i.e. it was an orphaned model), it should
# not be in this list
if swarmId is not None:
assert (not entry['hidden'])
# Get info on this model
modelParams = entry['modelParams']
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
if completed is not None and (completed != isCompleted):
continue
if matured is not None and (matured != isMatured):
continue
if lastDescendent \
and (self._particleLatestGenIdx[particleId] != particleGenIdx):
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getOrphanParticleInfos(self, swarmId, genIdx):
"""Return a list of particleStates for all particles in the given
swarm generation that have been orphaned.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
# Get info on this model
entry = self._allResults[idx]
if not entry['hidden']:
continue
modelParams = entry['modelParams']
if modelParams['particleState']['swarmId'] != swarmId:
continue
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getMaturedSwarmGenerations(self):
"""Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
"""
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result
def firstNonFullGeneration(self, swarmId, minNumParticles):
""" Return the generation index of the first generation in the given
swarm that does not have numParticles particles in it, either still in the
running state or completed. This does not include orphaned particles.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
minNumParticles: minium number of partices required for a full
generation.
retval: generation index, or None if no particles at all.
"""
if not swarmId in self._swarmNumParticlesPerGeneration:
return None
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
numPsPerGen = numpy.array(numPsPerGen)
firstNonFull = numpy.where(numPsPerGen < minNumParticles)[0]
if len(firstNonFull) == 0:
return len(numPsPerGen)
else:
return firstNonFull[0]
def highestGeneration(self, swarmId):
""" Return the generation index of the highest generation in the given
swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
retval: generation index
"""
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
return len(numPsPerGen)-1
def getParticleBest(self, particleId):
""" Return the best score and position for a given particle. The position
is given as a dict, with varName:varPosition items in it.
Parameters:
---------------------------------------------------------------------
particleId: which particle
retval: (bestResult, bestPosition)
"""
return self._particleBest.get(particleId, (None, None))
def getResultsPerChoice(self, swarmId, maxGenIdx, varName):
""" Return a dict of the errors obtained on models that were run with
each value from a PermuteChoice variable.
For example, if a PermuteChoice variable has the following choices:
['a', 'b', 'c']
The dict will have 3 elements. The keys are the stringified choiceVars,
and each value is tuple containing (choiceVar, errors) where choiceVar is
the original form of the choiceVar (before stringification) and errors is
the list of errors received from models that used the specific choice:
retval:
['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id of the swarm to retrieve info from
maxGenIdx: max generation index to consider from other models, ignored
if None
varName: which variable to retrieve
retval: list of the errors obtained from each choice.
"""
results = dict()
# Get all the completed particles in this swarm
(allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId,
genIdx=None, matured=True)
for particleState, resultErr in itertools.izip(allParticles, resultErrs):
# Consider this generation?
if maxGenIdx is not None:
if particleState['genIdx'] > maxGenIdx:
continue
# Ignore unless this model completed successfully
if resultErr == numpy.inf:
continue
position = Particle.getPositionFromState(particleState)
varPosition = position[varName]
varPositionStr = str(varPosition)
if varPositionStr in results:
results[varPositionStr][1].append(resultErr)
else:
results[varPositionStr] = (varPosition, [resultErr])
return results
class HypersearchV2(object):
"""The v2 Hypersearch implementation. This is one example of a Hypersearch
implementation that can be used by the HypersearchWorker. Other implementations
just have to implement the following methods:
createModels()
recordModelProgress()
getPermutationVariables()
getComplexVariableLabelLookupDict()
This implementation uses a hybrid of Particle Swarm Optimization (PSO) and
the old "ronamatic" logic from Hypersearch V1. Variables which are lists of
choices (i.e. string values, integer values that represent different
categories) are searched using the ronamatic logic whereas floats and
integers that represent a range of values are searched using PSO.
For prediction experiments, this implementation starts out evaluating only
single encoder models that encode the predicted field. This is the first
"sprint". Once it finds the optimum set of variables for that, it starts to
build up by adding in combinations of 2 fields (the second "sprint"), where
one of them is the predicted field. Once the top 2-field combination(s) are
discovered, it starts to build up on those by adding in a 3rd field, etc.
Each new set of field combinations is called a sprint.
For classification experiments, this implementation starts out evaluating two
encoder models, where one of the encoders is the classified field. This is the
first "sprint". Once it finds the optimum set of variables for that, it starts
to build up by evauating combinations of 3 fields (the second "sprint"), where
two of them are the best 2 fields found in the first sprint (one of those of
course being the classified field). Once the top 3-field combination(s) are
discovered, it starts to build up on those by adding in a 4th field, etc.
In classification models, the classified field, although it has an encoder, is
not sent "into" the network. Rather, the encoded value just goes directly to
the classifier as the classifier input.
At any one time, there are 1 or more swarms being evaluated at the same time -
each swarm representing a certain field combination within the sprint. We try
to load balance the swarms and have the same number of models evaluated for
each swarm at any one time. Each swarm contains N particles, and we also try
to keep N >= some mininum number. Each position of a particle corresponds to a
model.
When a worker is ready to evaluate a new model, it first picks the swarm with
the least number of models so far (least number of evaluated particle
positions). If that swarm does not have the min number of particles in it yet,
or does not yet have a particle created by this worker, the worker will create
a new particle, else it will choose another particle from that swarm that it
had created in the past which has the least number of evaluated positions so
far.
"""
def __init__(self, searchParams, workerID=None, cjDAO=None, jobID=None,
logLevel=None):
"""Instantiate the HyperseachV2 instance.
Parameters:
----------------------------------------------------------------------
searchParams: a dict of the job's search parameters. The format is:
persistentJobGUID: REQUIRED.
Persistent, globally-unique identifier for this job
for use in constructing persistent model checkpoint
keys. MUST be compatible with S3 key-naming rules, but
MUST NOT contain forward slashes. This GUID is
expected to retain its global uniqueness across
clusters and cluster software updates (unlike the
record IDs in the Engine's jobs table, which recycle
upon table schema change and software update). In the
future, this may also be instrumental for checkpoint
garbage collection.
permutationsPyFilename:
OPTIONAL - path to permutations.py file
permutationsPyContents:
OPTIONAL - JSON encoded string with
contents of permutations.py file
descriptionPyContents:
OPTIONAL - JSON encoded string with
contents of base description.py file
description: OPTIONAL - JSON description of the search
createCheckpoints: OPTIONAL - Whether to create checkpoints
useTerminators OPTIONAL - True of False (default config.xml). When set
to False, the model and swarm terminators
are disabled
maxModels: OPTIONAL - max # of models to generate
NOTE: This is a deprecated location for this
setting. Now, it should be specified through
the maxModels variable within the permutations
file, or maxModels in the JSON description
dummyModel: OPTIONAL - Either (True/False) or a dict of parameters
for a dummy model. If this key is absent,
a real model is trained.
See utils.py/OPFDummyModel runner for the
schema of the dummy parameters
speculativeParticles OPTIONAL - True or False (default obtained from
nupic.hypersearch.speculative.particles.default
configuration property). See note below.
NOTE: The caller must provide just ONE of the following to describe the
hypersearch:
1.) permutationsPyFilename
OR 2.) permutationsPyContents & permutationsPyContents
OR 3.) description
The schema for the description element can be found at:
"py/nupic/frameworks/opf/expGenerator/experimentDescriptionSchema.json"
NOTE about speculativeParticles: If true (not 0), hypersearch workers will
go ahead and create and run particles in subsequent sprints and
generations before the current generation or sprint has been completed. If
false, a worker will wait in a sleep loop until the current generation or
sprint has finished before choosing the next particle position or going
into the next sprint. When true, the best model can be found faster, but
results are less repeatable due to the randomness of when each worker
completes each particle. This property can be overridden via the
speculativeParticles element of the Hypersearch job params.
workerID: our unique Hypersearch worker ID
cjDAO: ClientJobsDB Data Access Object
jobID: job ID for this hypersearch job
logLevel: override logging level to this value, if not None
"""
# Instantiate our logger
self.logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
# Override log level?
if logLevel is not None:
self.logger.setLevel(logLevel)
# This is how to check the logging level
#if self.logger.getEffectiveLevel() <= logging.DEBUG:
# print "at debug level"
# Init random seed
random.seed(42)
# Save the search info
self._searchParams = searchParams
self._workerID = workerID
self._cjDAO = cjDAO
self._jobID = jobID
# Log search params
self.logger.info("searchParams: \n%s" % (pprint.pformat(
clippedObj(searchParams))))
self._createCheckpoints = self._searchParams.get('createCheckpoints',
False)
self._maxModels = self._searchParams.get('maxModels', None)
if self._maxModels == -1:
self._maxModels = None
self._predictionCacheMaxRecords = self._searchParams.get('predictionCacheMaxRecords', None)
# Speculative particles?
self._speculativeParticles = self._searchParams.get('speculativeParticles',
bool(int(Configuration.get(
'nupic.hypersearch.speculative.particles.default'))))
self._speculativeWaitSecondsMax = float(Configuration.get(
'nupic.hypersearch.speculative.particles.sleepSecondsMax'))
# Maximum Field Branching
self._maxBranching= int(Configuration.get(
'nupic.hypersearch.max.field.branching'))
# Minimum Field Contribution
self._minFieldContribution= float(Configuration.get(
'nupic.hypersearch.min.field.contribution'))
# This gets set if we detect that the job got cancelled
self._jobCancelled = False
# Use terminators (typically set by permutations_runner.py)
if 'useTerminators' in self._searchParams:
useTerminators = self._searchParams['useTerminators']
useTerminators = str(int(useTerminators))
Configuration.set('nupic.hypersearch.enableModelTermination', useTerminators)
Configuration.set('nupic.hypersearch.enableModelMaturity', useTerminators)
Configuration.set('nupic.hypersearch.enableSwarmTermination', useTerminators)
# Special test mode?
if 'NTA_TEST_exitAfterNModels' in os.environ:
self._maxModels = int(os.environ['NTA_TEST_exitAfterNModels'])
self._dummyModel = self._searchParams.get('dummyModel', None)
# Holder for temporary directory, if any, that needs to be cleaned up
# in our close() method.
self._tempDir = None
try:
# Get the permutations info. This can be either:
# 1.) JSON encoded search description (this will be used to generate a
# permutations.py and description.py files using ExpGenerator)
# 2.) path to a pre-generated permutations.py file. The description.py is
# assumed to be in the same directory
# 3.) contents of the permutations.py and descrption.py files.
if 'description' in self._searchParams:
if ('permutationsPyFilename' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
# Calculate training period for anomaly models
searchParamObj = self._searchParams
anomalyParams = searchParamObj['description'].get('anomalyParams',
dict())
# This is used in case searchParamObj['description']['anomalyParams']
# is set to None.
if anomalyParams is None:
anomalyParams = dict()
if (('autoDetectWaitRecords' not in anomalyParams) or
(anomalyParams['autoDetectWaitRecords'] is None)):
streamDef = self._getStreamDef(searchParamObj['description'])
from nupic.data.stream_reader import StreamReader
try:
streamReader = StreamReader(streamDef, isBlocking=False,
maxTimeout=0, eofOnTimeout=True)
anomalyParams['autoDetectWaitRecords'] = \
streamReader.getDataRowCount()
except Exception:
anomalyParams['autoDetectWaitRecords'] = None
self._searchParams['description']['anomalyParams'] = anomalyParams
# Call the experiment generator to generate the permutations and base
# description file.
outDir = self._tempDir = tempfile.mkdtemp()
expGenerator([
'--description=%s' % (
json.dumps(self._searchParams['description'])),
'--version=v2',
'--outDir=%s' % (outDir)])
# Get the name of the permutations script.
permutationsScript = os.path.join(outDir, 'permutations.py')
elif 'permutationsPyFilename' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or "
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
permutationsScript = self._searchParams['permutationsPyFilename']
elif 'permutationsPyContents' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyFilename' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
assert ('descriptionPyContents' in self._searchParams)
# Generate the permutations.py and description.py files
outDir = self._tempDir = tempfile.mkdtemp()
permutationsScript = os.path.join(outDir, 'permutations.py')
fd = open(permutationsScript, 'w')
fd.write(self._searchParams['permutationsPyContents'])
fd.close()
fd = open(os.path.join(outDir, 'description.py'), 'w')
fd.write(self._searchParams['descriptionPyContents'])
fd.close()
else:
raise RuntimeError ("Either 'description' or 'permutationsScript' must be"
"specified")
# Get the base path of the experiment and read in the base description
self._basePath = os.path.dirname(permutationsScript)
self._baseDescription = open(os.path.join(self._basePath,
'description.py')).read()
self._baseDescriptionHash = hashlib.md5(self._baseDescription).digest()
# Read the model config to figure out the inference type
modelDescription, _ = helpers.loadExperiment(self._basePath)
# Read info from permutations file. This sets up the following member
# variables:
# _predictedField
# _permutations
# _flattenedPermutations
# _encoderNames
# _reportKeys
# _filterFunc
# _optimizeKey
# _maximize
# _dummyModelParamsFunc
self._readPermutationsFile(permutationsScript, modelDescription)
# Fill in and save the base description and permutations file contents
# if they haven't already been filled in by another worker
if self._cjDAO is not None:
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genBaseDescription',
curValue=None,
newValue = self._baseDescription)
if updated:
permContents = open(permutationsScript).read()
self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genPermutations',
curValue=None,
newValue = permContents)
# if user provided an artificialMetric, force use of the dummy model
if self._dummyModelParamsFunc is not None:
if self._dummyModel is None:
self._dummyModel = dict()
# If at DEBUG log level, print out permutations info to the log
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "Permutations file specifications: "
info = dict()
for key in ['_predictedField', '_permutations',
'_flattenedPermutations', '_encoderNames',
'_reportKeys', '_optimizeKey', '_maximize']:
info[key] = getattr(self, key)
print >> msg, pprint.pformat(info)
self.logger.debug(msg.getvalue())
msg.close()
# Instantiate our database to hold the results we received so far
self._resultsDB = ResultsDB(self)
# Instantiate the Swarm Terminator
self._swarmTerminator = SwarmTerminator()
# Initial hypersearch state
self._hsState = None
# The Max # of attempts we will make to create a unique model before
# giving up.
self._maxUniqueModelAttempts = int(Configuration.get(
'nupic.hypersearch.maxUniqueModelAttempts'))
# The max amount of time allowed before a model is considered orphaned.
self._modelOrphanIntervalSecs = float(Configuration.get(
'nupic.hypersearch.modelOrphanIntervalSecs'))
# The max percent of models that can complete with errors
self._maxPctErrModels = float(Configuration.get(
'nupic.hypersearch.maxPctErrModels'))
except:
# Clean up our temporary directory, if any
if self._tempDir is not None:
shutil.rmtree(self._tempDir)
self._tempDir = None
raise
return
def _getStreamDef(self, modelDescription):
"""
Generate stream definition based on
"""
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in modelDescription['streamDef']:
for key in aggregationPeriod.keys():
if key in modelDescription['streamDef']['aggregation']:
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key]
if 'fields' in modelDescription['streamDef']['aggregation']:
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef
def __del__(self):
"""Destructor; NOTE: this is not guaranteed to be called (bugs like
circular references could prevent it from being called).
"""
self.close()
return
def close(self):
"""Deletes temporary system objects/files. """
if self._tempDir is not None and os.path.isdir(self._tempDir):
self.logger.debug("Removing temporary directory %r", self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None
return
def _readPermutationsFile(self, filename, modelDescription):
"""
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate HTMPredictionModel results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
"""
# Open and execute the permutations file
vars = {}
permFile = execfile(filename, globals(), vars)
# Read in misc info.
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None # default
self._predictedFieldEncoder = None # default
self._fixedFields = None # default
# The fastSwarm variable, if present, contains the params from a best
# model from a previous swarm. If present, use info from that to seed
# a fast swarm
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if self._fastSwarmModelParams is not None:
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']\
['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if encoders[fieldName] is not None:
self._fixedFields.append(fieldName)
if 'fixedFields' in vars:
self._fixedFields = vars['fixedFields']
# Get min number of particles per swarm from either permutations file or
# config.
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if self._minParticlesPerSwarm == None:
self._minParticlesPerSwarm = Configuration.get(
'nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
# Enable logic to kill off speculative swarms when an earlier sprint
# has found that it contains poorly performing field combination?
self._killUselessSwarms = vars.get('killUselessSwarms', True)
# The caller can request that the predicted field ALWAYS be included ("yes")
# or optionally include ("auto"). The setting of "no" is N/A and ignored
# because in that case the encoder for the predicted field will not even
# be present in the permutations file.
# When set to "yes", this will force the first sprint to try the predicted
# field only (the legacy mode of swarming).
# When set to "auto", the first sprint tries all possible fields (one at a
# time) in the first sprint.
self._inputPredictedField = vars.get("inputPredictedField", "yes")
# Try all possible 3-field combinations? Normally, we start with the best
# 2-field combination as a base. When this flag is set though, we try
# all possible 3-field combinations which takes longer but can find a
# better model.
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
# Always include timestamp fields in the 3-field swarms?
# This is a less compute intensive version of tryAll3FieldCombinations.
# Instead of trying ALL possible 3 field combinations, it just insures
# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left
# out when generating the 3-field swarms.
self._tryAll3FieldCombinationsWTimestamps = vars.get(
'tryAll3FieldCombinationsWTimestamps', False)
# Allow the permutations file to override minFieldContribution. This would
# be set to a negative number for large swarms so that you don't disqualify
# a field in an early sprint just because it did poorly there. Sometimes,
# a field that did poorly in an early sprint could help accuracy when
# added in a later sprint
minFieldContribution = vars.get('minFieldContribution', None)
if minFieldContribution is not None:
self._minFieldContribution = minFieldContribution
# Allow the permutations file to override maxBranching.
maxBranching = vars.get('maxFieldBranching', None)
if maxBranching is not None:
self._maxBranching = maxBranching
# Read in the optimization info.
if 'maximize' in vars:
self._optimizeKey = vars['maximize']
self._maximize = True
elif 'minimize' in vars:
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize"
" or minimize metric.")
# The permutations file is the new location for maxModels. The old location,
# in the jobParams is deprecated.
maxModels = vars.get('maxModels')
if maxModels is not None:
if self._maxModels is None:
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job'
' params AND in the permutations file.')
# Figure out if what kind of search this is:
#
# If it's a temporal prediction search:
# the first sprint has 1 swarm, with just the predicted field
# elif it's a spatial prediction search:
# the first sprint has N swarms, each with predicted field + one
# other field.
# elif it's a classification search:
# the first sprint has N swarms, each with 1 field
inferenceType = modelDescription['modelParams']['inferenceType']
if not InferenceType.validate(inferenceType):
raise ValueError("Invalid inference type %s" %inferenceType)
if inferenceType in [InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep]:
# If it does not have a separate encoder for the predicted field that
# goes to the classifier, it is a legacy multi-step network
classifierOnlyEncoder = None
for encoder in modelDescription["modelParams"]["sensorParams"]\
["encoders"].values():
if encoder.get("classifierOnly", False) \
and encoder["fieldname"] == vars.get('predictedField', None):
classifierOnlyEncoder = encoder
break
if classifierOnlyEncoder is None or self._inputPredictedField=="yes":
# If we don't have a separate encoder for the classifier (legacy
# MultiStep) or the caller explicitly wants to include the predicted
# field, then use the legacy temporal search methodology.
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif inferenceType in [InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly]:
self._searchType = HsSearchType.legacyTemporal
elif inferenceType in (InferenceType.TemporalClassification,
InferenceType.NontemporalClassification):
self._searchType = HsSearchType.classification
else:
raise RuntimeError("Unsupported inference type: %s" % inferenceType)
# Get the predicted field. Note that even classification experiments
# have a "predicted" field - which is the field that contains the
# classification value.
self._predictedField = vars.get('predictedField', None)
if self._predictedField is None:
raise RuntimeError("Permutations file '%s' does not have the required"
" 'predictedField' variable" % filename)
# Read in and validate the permutations dict
if 'permutations' not in vars:
raise RuntimeError("Permutations file '%s' does not define permutations" % filename)
if not isinstance(vars['permutations'], dict):
raise RuntimeError("Permutations file '%s' defines a permutations variable "
"but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if ':' in keys[-1]:
raise RuntimeError("The permutation variable '%s' contains a ':' "
"character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
# If this is the encoder for the predicted field, save its name.
if value.fieldName == self._predictedField:
self._predictedFieldEncoder = flatKey
# Store the flattened representations of the variables within the
# encoder.
for encKey, encValue in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations['%s:%s' % (flatKey, encKey)] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
else:
if isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
rApply(self._permutations, _flattenPermutations)
def getExpectedNumModels(self):
"""Computes the number of models that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py for use in progress
reporting.
Parameters:
---------------------------------------------------------
retval: The total number of expected models, if known; -1 if unknown
"""
return -1
def getModelNames(self):
"""Generates a list of model names that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py.
Parameters:
---------------------------------------------------------
retval: List of model names for this HypersearchV2 instance, or
None of not applicable
"""
return None
def getPermutationVariables(self):
"""Returns a dictionary of permutation variables.
Parameters:
---------------------------------------------------------
retval: A dictionary of permutation variables; keys are
flat permutation variable names and each value is
a sub-class of PermuteVariable.
"""
return self._flattenedPermutations
def getComplexVariableLabelLookupDict(self):
"""Generates a lookup dictionary of permutation variables whose values
are too complex for labels, so that artificial labels have to be generated
for them.
Parameters:
---------------------------------------------------------
retval: A look-up dictionary of permutation
variables whose values are too complex for labels, so
artificial labels were generated instead (e.g., "Choice0",
"Choice1", etc.); the key is the name of the complex variable
and the value is:
dict(labels=<list_of_labels>, values=<list_of_values>).
"""
raise NotImplementedError
def getOptimizationMetricInfo(self):
"""Retrives the optimization key name and optimization function.
Parameters:
---------------------------------------------------------
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
"""
return (self._optimizeKey, self._maximize)
def _checkForOrphanedModels (self):
"""If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval:
"""
self.logger.debug("Checking for orphaned models older than %s" % \
(self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,
self._modelOrphanIntervalSecs)
if orphanedModelId is None:
return
self.logger.info("Removing orphaned model: %d" % (orphanedModelId))
# Change the model hash and params hash as stored in the models table so
# that we can insert a new model with the same paramsHash
for attempt in range(100):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (orphanedModelId,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (orphanedModelId,
attempt)).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
# Mark this model as complete, with reason "orphaned"
self._cjDAO.modelSetCompleted(modelID=orphanedModelId,
completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,
completionMsg="Orphaned")
# Update our results DB immediately, rather than wait for the worker
# to inform us. This insures that the getParticleInfos() calls we make
# below don't include this particle. Setting the metricResult to None
# sets it to worst case
self._resultsDB.update(modelID=orphanedModelId,
modelParams=None,
modelParamsHash=paramsHash,
metricResult=None,
completed = True,
completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,
matured = True,
numRecords = 0)
def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):
"""
Periodically, check to see if we should remove a certain field combination
from evaluation (because it is doing so poorly) or move on to the next
sprint (add in more fields).
This method is called from _getCandidateParticleAndSwarm(), which is called
right before we try and create a new model to run.
Parameters:
-----------------------------------------------------------------------
removeSwarmId: If not None, force a change to the current set of active
swarms by removing this swarm. This is used in situations
where we can't find any new unique models to create in
this swarm. In these situations, we update the hypersearch
state regardless of the timestamp of the last time another
worker updated it.
"""
if self._hsState is None:
self._hsState = HsState(self)
# Read in current state from the DB
self._hsState.readStateFromDB()
# This will hold the list of completed swarms that we find
completedSwarms = set()
# Mark the exhausted swarm as completing/completed, if any
if exhaustedSwarmId is not None:
self.logger.info("Removing swarm %s from the active set "
"because we can't find any new unique particle "
"positions" % (exhaustedSwarmId))
# Is it completing or completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=exhaustedSwarmId, matured=False)
if len(particles) > 0:
exhaustedSwarmStatus = 'completing'
else:
exhaustedSwarmStatus = 'completed'
# Kill all swarms that don't need to be explored based on the most recent
# information.
if self._killUselessSwarms:
self._hsState.killUselessSwarms()
# For all swarms that were in the 'completing' state, see if they have
# completed yet.
#
# Note that we are not quite sure why this doesn't automatically get handled
# when we receive notification that a model finally completed in a swarm.
# But, we ARE running into a situation, when speculativeParticles is off,
# where we have one or more swarms in the 'completing' state even though all
# models have since finished. This logic will serve as a failsafe against
# this situation.
completingSwarms = self._hsState.getCompletingSwarms()
for swarmId in completingSwarms:
# Is it completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, matured=False)
if len(particles) == 0:
completedSwarms.add(swarmId)
# Are there any swarms we can remove (because they have matured)?
completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()
priorCompletedSwarms = self._hsState.getCompletedSwarms()
for (swarmId, genIdx, errScore) in completedSwarmGens:
# Don't need to report it if the swarm already completed
if swarmId in priorCompletedSwarms:
continue
completedList = self._swarmTerminator.recordDataPoint(
swarmId=swarmId, generation=genIdx, errScore=errScore)
# Update status message
statusMsg = "Completed generation #%d of swarm '%s' with a best" \
" errScore of %g" % (genIdx, swarmId, errScore)
if len(completedList) > 0:
statusMsg = "%s. Matured swarm(s): %s" % (statusMsg, completedList)
self.logger.info(statusMsg)
self._cjDAO.jobSetFields (jobID=self._jobID,
fields=dict(engStatus=statusMsg),
useConnectionID=False,
ignoreUnchanged=True)
# Special test mode to check which swarms have terminated
if 'NTA_TEST_recordSwarmTerminations' in os.environ:
while True:
resultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if resultsStr is None:
results = {}
else:
results = json.loads(resultsStr)
if not 'terminatedSwarms' in results:
results['terminatedSwarms'] = {}
for swarm in completedList:
if swarm not in results['terminatedSwarms']:
results['terminatedSwarms'][swarm] = (genIdx,
self._swarmTerminator.swarmScores[swarm])
newResultsStr = json.dumps(results)
if newResultsStr == resultsStr:
break
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='results',
curValue=resultsStr,
newValue = json.dumps(results))
if updated:
break
if len(completedList) > 0:
for name in completedList:
self.logger.info("Swarm matured: %s. Score at generation %d: "
"%s" % (name, genIdx, errScore))
completedSwarms = completedSwarms.union(completedList)
if len(completedSwarms)==0 and (exhaustedSwarmId is None):
return
# We need to mark one or more swarms as completed, keep trying until
# successful, or until some other worker does it for us.
while True:
if exhaustedSwarmId is not None:
self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)
# Mark the completed swarms as completed
for swarmId in completedSwarms:
self._hsState.setSwarmState(swarmId, 'completed')
# If nothing changed, we're done
if not self._hsState.isDirty():
return
# Update the shared Hypersearch state now
# This will do nothing and return False if some other worker beat us to it
success = self._hsState.writeStateToDB()
if success:
# Go through and cancel all models that are still running, except for
# the best model. Once the best model changes, the one that used to be
# best (and has matured) will notice that and stop itself at that point.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
bestModelId = jobResults.get('bestModel', None)
else:
bestModelId = None
for swarmId in list(completedSwarms):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, completed=False)
if bestModelId in modelIds:
modelIds.remove(bestModelId)
if len(modelIds) == 0:
continue
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmId,
str(modelIds)))
for modelId in modelIds:
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged = True)
return
# We were not able to change the state because some other worker beat us
# to it.
# Get the new state, and try again to apply our changes.
self._hsState.readStateFromDB()
self.logger.debug("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._hsState._state, indent=4)))
def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):
"""Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
"""
# Cancel search?
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
# Did a worker cancel the job because of an error?
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason', 'workerCompletionMsg'])
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self.logger.info("Exiting due to job being cancelled")
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg="Job was cancelled"),
useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error("Exiting because some worker set the "
"workerCompletionReason to %s. WorkerCompletionMsg: %s" %
(workerCmpReason, workerCmpMsg))
return (True, None, None)
# Perform periodic updates on the Hypersearch state.
if self._hsState is not None:
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
# Update the HypersearchState, checking for matured swarms, and marking
# the passed in swarm as exhausted, if any
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
# The above call may have modified self._hsState['activeSwarmIds']
# Log the current set of active swarms
activeSwarms = self._hsState.getActiveSwarms()
if activeSwarms != priorActiveSwarms:
self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms,
priorActiveSwarms))
self.logger.debug("Active swarms: %s" % (activeSwarms))
# If too many model errors were detected, exit
totalCmpModels = self._resultsDB.getNumCompletedModels()
if totalCmpModels > 5:
numErrs = self._resultsDB.getNumErrModels()
if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:
# Get one of the errors
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = "%s: Exiting due to receiving too many models failing" \
" from exceptions (%d out of %d). \nModel Exception: %s" % \
(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,
modelErrMsg)
self.logger.error(cmpMsg)
# Cancel the entire job now, if it has not already been cancelled
workerCmpReason = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self._cjDAO.jobSetFields(
self._jobID,
fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = cmpMsg),
useConnectionID=False,
ignoreUnchanged=True)
return (True, None, None)
# If HsState thinks the search is over, exit. It is seeing if the results
# on the sprint we just completed are worse than a prior sprint.
if self._hsState.isSearchOver():
cmpMsg = "Exiting because results did not improve in most recently" \
" completed sprint."
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
# Search successive active sprints, until we can find a candidate particle
# to work with
sprintIdx = -1
while True:
# Is this sprint active?
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
# If no more sprints to explore:
if eos:
# If any prior ones are still being explored, finish up exploring them
if self._hsState.anyGoodSprintsActive():
self.logger.info("No more sprints to explore, waiting for prior"
" sprints to complete")
return (False, None, None)
# Else, we're done
else:
cmpMsg = "Exiting because we've evaluated all possible field " \
"combinations"
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if not active:
if not self._speculativeParticles:
if not self._hsState.isSprintCompleted(sprintIdx):
self.logger.info("Waiting for all particles in sprint %d to complete"
"before evolving any more particles" % (sprintIdx))
return (False, None, None)
continue
# ====================================================================
# Look for swarms that have particle "holes" in their generations. That is,
# an earlier generation with less than minParticlesPerSwarm. This can
# happen if a model that was started eariler got orphaned. If we detect
# this, start a new particle in that generation.
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(
swarmId=swarmId,
minNumParticles=self._minParticlesPerSwarm)
if firstNonFullGenIdx is None:
continue
if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):
self.logger.info("Cloning an earlier model in generation %d of swarm "
"%s (sprintIdx=%s) to replace an orphaned model" % (
firstNonFullGenIdx, swarmId, sprintIdx))
# Clone a random orphaned particle from the incomplete generation
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if len(allModelIds) > 0:
# We have seen instances where we get stuck in a loop incessantly
# trying to clone earlier models (NUP-1511). My best guess is that
# we've already successfully cloned each of the orphaned models at
# least once, but still need at least one more. If we don't create
# a new particleID, we will never be able to instantiate another
# model (since particleID hash is a unique key in the models table).
# So, on 1/8/2013 this logic was changed to create a new particleID
# whenever we clone an orphan.
newParticleId = True
self.logger.info("Cloning an orphaned model")
# If there is no orphan, clone one of the other particles. We can
# have no orphan if this was a speculative generation that only
# continued particles completed in the prior generation.
else:
newParticleId = True
self.logger.info("No orphans found, so cloning a non-orphan")
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getParticleInfos(swarmId=swarmId,
genIdx=firstNonFullGenIdx)
# Clone that model
modelId = random.choice(allModelIds)
self.logger.info("Cloning model %r" % (modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj = self,
resultsDB = self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
newFromClone=particleState,
newParticleId=newParticleId)
return (False, particle, swarmId)
# ====================================================================
# Sort the swarms in priority order, trying the ones with the least
# number of models first
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
# -------------------------------------------------------------------
# 1.) The particle will be created from new (at generation #0) if there
# are not already self._minParticlesPerSwarm particles in the swarm.
(allParticles, allModelIds, errScores, completed, matured) = (
self._resultsDB.getParticleInfos(swarmId))
if len(allParticles) < self._minParticlesPerSwarm:
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
swarmId=swarmId,
newFarFrom=allParticles)
# Jam in the best encoder state found from the first sprint
bestPriorModel = None
if sprintIdx >= 1:
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if bestPriorModel is not None:
self.logger.info("Best model and errScore from previous sprint(%d):"
" %s, %g" % (0, str(bestPriorModel), errScore))
(baseState, modelId, errScore, completed, matured) \
= self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
# Copy the best inference type from the earlier sprint
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
# It's best to jiggle the best settings from the prior sprint, so
# compute a new position starting from that previous best
# Only jiggle the vars we copied from the prior model
whichVars = []
for varName in baseState['varStates']:
if ':' in varName:
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug("Particle after incorporating encoder vars from best "
"model in previous sprint: \n%s" % (str(particle)))
return (False, particle, swarmId)
# -------------------------------------------------------------------
# 2.) Look for a completed particle to evolve
# Note that we use lastDescendent. We only want to evolve particles that
# are at their most recent generation index.
(readyParticles, readyModelIds, readyErrScores, _, _) = (
self._resultsDB.getParticleInfos(swarmId, genIdx=None,
matured=True, lastDescendent=True))
# If we have at least 1 ready particle to evolve...
if len(readyParticles) > 0:
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
# Now, genIdx has the generation of the particle we want to run,
# Get a particle from that generation and evolve it.
useParticle = None
for particle in readyParticles:
if particle['genIdx'] == genIdx:
useParticle = particle
break
# If speculativeParticles is off, we don't want to evolve a particle
# into the next generation until all particles in the current
# generation have completed.
if not self._speculativeParticles:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId, genIdx=genIdx, matured=False)
if len(particles) > 0:
continue
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
evolveFromState=useParticle)
return (False, particle, swarmId)
# END: for (swarmSize, swarmId) in swarmSizeAndIdList:
# No success in this swarm, onto next swarm
# ====================================================================
# We couldn't find a particle in this sprint ready to evolve. If
# speculative particles is OFF, we have to wait for one or more other
# workers to finish up their particles before we can do anything.
if not self._speculativeParticles:
self.logger.info("Waiting for one or more of the %s swarms "
"to complete a generation before evolving any more particles" \
% (str(swarmIds)))
return (False, None, None)
# END: while True:
# No success in this sprint, into next sprint
def _okToExit(self):
"""Test if it's OK to exit this worker. This is only called when we run
out of prospective new models to evaluate. This method sees if all models
have matured yet. If not, it will sleep for a bit and return False. This
will indicate to the hypersearch worker that we should keep running, and
check again later. This gives this worker a chance to pick up and adopt any
model which may become orphaned by another worker before it matures.
If all models have matured, this method will send a STOP message to all
matured, running models (presummably, there will be just one - the model
which thinks it's the best) before returning True.
"""
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: _okToExit"
# Any immature models still running?
if not self._jobCancelled:
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False)
if len(modelIds) > 0:
self.logger.info("Ready to end hyperseach, but not all models have " \
"matured yet. Sleeping a bit to wait for all models " \
"to mature.")
# Sleep for a bit, no need to check for orphaned models very often
time.sleep(5.0 * random.random())
return False
# All particles have matured, send a STOP signal to any that are still
# running.
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(completed=False)
for modelId in modelIds:
self.logger.info("Stopping model %d because the search has ended" \
% (modelId))
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED),
ignoreUnchanged = True)
# Update the HsState to get the accurate field contributions.
self._hsStatePeriodicUpdate()
pctFieldContributions, absFieldContributions = \
self._hsState.getFieldContributions()
# Update the results field with the new field contributions.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
else:
jobResults = {}
# Update the fieldContributions field.
if pctFieldContributions != jobResults.get('fieldContributions', None):
jobResults['fieldContributions'] = pctFieldContributions
jobResults['absoluteFieldContributions'] = absFieldContributions
isUpdated = self._cjDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
self.logger.info('Successfully updated the field contributions:%s',
pctFieldContributions)
else:
self.logger.info('Failed updating the field contributions, ' \
'another hypersearch worker must have updated it')
return True
def killSwarmParticles(self, swarmID):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmID, completed=False)
for modelId in modelIds:
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmID,
str(modelIds)))
self._cjDAO.modelSetFields(
modelId, dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged=True)
def createModels(self, numModels=1):
"""Create one or more new models for evaluation. These should NOT be models
that we already know are in progress (i.e. those that have been sent to us
via recordModelProgress). We return a list of models to the caller
(HypersearchWorker) and if one can be successfully inserted into
the models table (i.e. it is not a duplicate) then HypersearchWorker will
turn around and call our runModel() method, passing in this model. If it
is a duplicate, HypersearchWorker will call this method again. A model
is a duplicate if either the modelParamsHash or particleHash is
identical to another entry in the model table.
The numModels is provided by HypersearchWorker as a suggestion as to how
many models to generate. This particular implementation only ever returns 1
model.
Before choosing some new models, we first do a sweep for any models that
may have been abandonded by failed workers. If/when we detect an abandoned
model, we mark it as complete and orphaned and hide it from any subsequent
queries to our ResultsDB. This effectively considers it as if it never
existed. We also change the paramsHash and particleHash in the model record
of the models table so that we can create another model with the same
params and particle status and run it (which we then do immediately).
The modelParamsHash returned for each model should be a hash (max allowed
size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by
it's params and the optional particleHash should be a hash of the particleId
and generation index. Every model that gets placed into the models database,
either by this worker or another worker, will have these hashes computed for
it. The recordModelProgress gets called for every model in the database and
the hash is used to tell which, if any, are the same as the ones this worker
generated.
NOTE: We check first ourselves for possible duplicates using the paramsHash
before we return a model. If HypersearchWorker failed to insert it (because
some other worker beat us to it), it will turn around and call our
recordModelProgress with that other model so that we now know about it. It
will then call createModels() again.
This methods returns an exit boolean and the model to evaluate. If there is
no model to evalulate, we may return False for exit because we want to stay
alive for a while, waiting for all other models to finish. This gives us
a chance to detect and pick up any possibly orphaned model by another
worker.
Parameters:
----------------------------------------------------------------------
numModels: number of models to generate
retval: (exit, models)
exit: true if this worker should exit.
models: list of tuples, one for each model. Each tuple contains:
(modelParams, modelParamsHash, particleHash)
modelParams is a dictionary containing the following elements:
structuredParams: dictionary containing all variables for
this model, with encoders represented as a dict within
this dict (or None if they are not included.
particleState: dictionary containing the state of this
particle. This includes the position and velocity of
each of it's variables, the particleId, and the particle
generation index. It contains the following keys:
id: The particle Id of the particle we are using to
generate/track this model. This is a string of the
form <hypesearchWorkerId>.<particleIdx>
genIdx: the particle's generation index. This starts at 0
and increments every time we move the particle to a
new position.
swarmId: The swarmId, which is a string of the form
<encoder>.<encoder>... that describes this swarm
varStates: dict of the variable states. The key is the
variable name, the value is a dict of the variable's
position, velocity, bestPosition, bestResult, etc.
"""
# Check for and mark orphaned models
self._checkForOrphanedModels()
modelResults = []
for _ in xrange(numModels):
candidateParticle = None
# If we've reached the max # of model to evaluate, we're done.
if (self._maxModels is not None and
(self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >=
self._maxModels):
return (self._okToExit(), [])
# If we don't already have a particle to work on, get a candidate swarm and
# particle to work with. If None is returned for the particle it means
# either that the search is over (if exitNow is also True) or that we need
# to wait for other workers to finish up their models before we can pick
# another particle to run (if exitNow is False).
if candidateParticle is None:
(exitNow, candidateParticle, candidateSwarm) = (
self._getCandidateParticleAndSwarm())
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: speculativeWait"
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
useEncoders = candidateSwarm.split('.')
numAttempts = 0
# Loop until we can create a unique model that we haven't seen yet.
while True:
# If this is the Nth attempt with the same candidate, agitate it a bit
# to find a new unique position for it.
if numAttempts >= 1:
self.logger.debug("Agitating particle to get unique position after %d "
"failed attempts in a row" % (numAttempts))
candidateParticle.agitate()
# Create the hierarchical params expected by the base description. Note
# that this is where we incorporate encoders that have no permuted
# values in them.
position = candidateParticle.getPosition()
structuredParams = dict()
def _buildStructuredParams(value, keys):
flatKey = _flattenKeys(keys)
# If it's an encoder, either put in None if it's not used, or replace
# all permuted constructor params with the actual position.
if flatKey in self._encoderNames:
if flatKey in useEncoders:
# Form encoder dict, substituting in chosen permutation values.
return value.getDict(flatKey, position)
# Encoder not used.
else:
return None
# Regular top-level variable.
elif flatKey in position:
return position[flatKey]
# Fixed override of a parameter in the base description.
else:
return value
structuredParams = rCopy(self._permutations,
_buildStructuredParams,
discardNoneKeys=False)
# Create the modelParams.
modelParams = dict(
structuredParams=structuredParams,
particleState = candidateParticle.getState()
)
# And the hashes.
m = hashlib.md5()
m.update(sortedJSONDumpS(structuredParams))
m.update(self._baseDescriptionHash)
paramsHash = m.digest()
particleInst = "%s.%s" % (modelParams['particleState']['id'],
modelParams['particleState']['genIdx'])
particleHash = hashlib.md5(particleInst).digest()
# Increase attempt counter
numAttempts += 1
# If this is a new one, and passes the filter test, exit with it.
# TODO: There is currently a problem with this filters implementation as
# it relates to self._maxUniqueModelAttempts. When there is a filter in
# effect, we should try a lot more times before we decide we have
# exhausted the parameter space for this swarm. The question is, how many
# more times?
if self._filterFunc and not self._filterFunc(structuredParams):
valid = False
else:
valid = True
if valid and self._resultsDB.getModelIDFromParamsHash(paramsHash) is None:
break
# If we've exceeded the max allowed number of attempts, mark this swarm
# as completing or completed, so we don't try and allocate any more new
# particles to it, and pick another.
if numAttempts >= self._maxUniqueModelAttempts:
(exitNow, candidateParticle, candidateSwarm) \
= self._getCandidateParticleAndSwarm(
exhaustedSwarmId=candidateSwarm)
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
numAttempts = 0
useEncoders = candidateSwarm.split('.')
# Log message
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Submitting new potential model to HypersearchWorker: \n%s"
% (pprint.pformat(modelParams, indent=4)))
modelResults.append((modelParams, paramsHash, particleHash))
return (False, modelResults)
def recordModelProgress(self, modelID, modelParams, modelParamsHash, results,
completed, completionReason, matured, numRecords):
"""Record or update the results for a model. This is called by the
HSW whenever it gets results info for another model, or updated results
on a model that is still running.
The first time this is called for a given modelID, the modelParams will
contain the params dict for that model and the modelParamsHash will contain
the hash of the params. Subsequent updates of the same modelID will
have params and paramsHash values of None (in order to save overhead).
The Hypersearch object should save these results into it's own working
memory into some table, which it then uses to determine what kind of
new models to create next time createModels() is called.
Parameters:
----------------------------------------------------------------------
modelID: ID of this model in models table
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for a
description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
results: tuple containing (allMetrics, optimizeMetric). Each is a
dict containing metricName:result pairs. .
May be none if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured. In most cases, once a
model matures, it will complete as well. The only time a
model matures and does not complete is if it's currently
the best model and we choose to keep it running to generate
predictions.
numRecords: Number of records that have been processed so far by this
model.
"""
if results is None:
metricResult = None
else:
metricResult = results[1].values()[0]
# Update our database.
errScore = self._resultsDB.update(modelID=modelID,
modelParams=modelParams,modelParamsHash=modelParamsHash,
metricResult=metricResult, completed=completed,
completionReason=completionReason, matured=matured,
numRecords=numRecords)
# Log message.
self.logger.debug('Received progress on model %d: completed: %s, '
'cmpReason: %s, numRecords: %d, errScore: %s' ,
modelID, completed, completionReason, numRecords, errScore)
# Log best so far.
(bestModelID, bestResult) = self._resultsDB.bestModelIdAndErrScore()
self.logger.debug('Best err score seen so far: %s on model %s' % \
(bestResult, bestModelID))
def runModel(self, modelID, jobID, modelParams, modelParamsHash,
jobsDAO, modelCheckpointGUID):
"""Run the given model.
This runs the model described by 'modelParams'. Periodically, it updates
the results seen on the model to the model database using the databaseAO
(database Access Object) methods.
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in models table
jobID: ID for this hypersearch job in the jobs table
modelParams: parameters of this specific model
modelParams is a dictionary containing the name/value
pairs of each variable we are permuting over. Note that
variables within an encoder spec have their name
structure as:
<encoderName>.<encodrVarName>
modelParamsHash: hash of modelParamValues
jobsDAO jobs data access object - the interface to the jobs
database where model information is stored
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key
"""
# We're going to make an assumption that if we're not using streams, that
# we also don't need checkpoints saved. For now, this assumption is OK
# (if there are no streams, we're typically running on a single machine
# and just save models to files) but we may want to break this out as
# a separate controllable parameter in the future
if not self._createCheckpoints:
modelCheckpointGUID = None
# Register this model in our database
self._resultsDB.update(modelID=modelID,
modelParams=modelParams,
modelParamsHash=modelParamsHash,
metricResult = None,
completed = False,
completionReason = None,
matured = False,
numRecords = 0)
# Get the structured params, which we pass to the base description
structuredParams = modelParams['structuredParams']
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Running Model. \nmodelParams: %s, \nmodelID=%s, " % \
(pprint.pformat(modelParams, indent=4), modelID))
# Record time.clock() so that we can report on cpu time
cpuTimeStart = time.clock()
# Run the experiment. This will report the results back to the models
# database for us as well.
logLevel = self.logger.getEffectiveLevel()
try:
if self._dummyModel is None or self._dummyModel is False:
(cmpReason, cmpMsg) = runModelGivenBaseAndParams(
modelID=modelID,
jobID=jobID,
baseDescription=self._baseDescription,
params=structuredParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
else:
dummyParams = dict(self._dummyModel)
dummyParams['permutationParams'] = structuredParams
if self._dummyModelParamsFunc is not None:
permInfo = dict(structuredParams)
permInfo ['generation'] = modelParams['particleState']['genIdx']
dummyParams.update(self._dummyModelParamsFunc(permInfo))
(cmpReason, cmpMsg) = runDummyModel(
modelID=modelID,
jobID=jobID,
params=dummyParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
# Write out the completion reason and message
jobsDAO.modelSetCompleted(modelID,
completionReason = cmpReason,
completionMsg = cmpMsg,
cpuTime = time.clock() - cpuTimeStart)
except InvalidConnectionException, e:
self.logger.warn("%s", e)
| 107,865 | Python | .py | 2,067 | 42.063377 | 95 | 0.6419 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,019 | model_chooser.py | numenta_nupic-legacy/src/nupic/swarming/model_chooser.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import time
import logging
from nupic.support import initLogging
class ModelChooser(object):
"""Utility Class to help with the selection of the 'best' model
during hypersearch for a particular job.
The main interface method is updateResultsForJob(), which is to
be called periodically from the hypersearch worker.
When called, the model chooser first tries to update the
_eng_last_selection_sweep_time field in the jobs table. If it
is successful, it then tries to find the model with the maximum
metric.
Note : Altough that there are many model choosers for a
given job, only 1 will update the results because only one
chooser will be able to update the _eng_last_selection_sweep_time
within a given interval
"""
_MIN_UPDATE_THRESHOLD = 100
_MIN_UPDATE_INTERVAL = 5
def __init__(self, jobID, jobsDAO, logLevel = None):
"""TODO: Documentation """
self._jobID = jobID
self._cjDB = jobsDAO
self._lastUpdateAttemptTime = 0
initLogging(verbose = True)
self.logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
if logLevel is not None:
self.logger.setLevel(logLevel)
self.logger.info("Created new ModelChooser for job %s" % str(jobID))
def updateResultsForJob(self, forceUpdate=True):
""" Chooses the best model for a given job.
Parameters
-----------------------------------------------------------------------
forceUpdate: (True/False). If True, the update will ignore all the
restrictions on the minimum time to update and the minimum
number of records to update. This should typically only be
set to true if the model has completed running
"""
updateInterval = time.time() - self._lastUpdateAttemptTime
if updateInterval < self._MIN_UPDATE_INTERVAL and not forceUpdate:
return
self.logger.info("Attempting model selection for jobID=%d: time=%f"\
" lastUpdate=%f"%(self._jobID,
time.time(),
self._lastUpdateAttemptTime))
timestampUpdated = self._cjDB.jobUpdateSelectionSweep(self._jobID,
self._MIN_UPDATE_INTERVAL)
if not timestampUpdated:
self.logger.info("Unable to update selection sweep timestamp: jobID=%d" \
" updateTime=%f"%(self._jobID, self._lastUpdateAttemptTime))
if not forceUpdate:
return
self._lastUpdateAttemptTime = time.time()
self.logger.info("Succesfully updated selection sweep timestamp jobid=%d updateTime=%f"\
%(self._jobID, self._lastUpdateAttemptTime))
minUpdateRecords = self._MIN_UPDATE_THRESHOLD
jobResults = self._getJobResults()
if forceUpdate or jobResults is None:
minUpdateRecords = 0
candidateIDs, bestMetric = self._cjDB.modelsGetCandidates(self._jobID, minUpdateRecords)
self.logger.info("Candidate models=%s, metric=%s, jobID=%s"\
%(candidateIDs, bestMetric, self._jobID))
if len(candidateIDs) == 0:
return
self._jobUpdateCandidate(candidateIDs[0], bestMetric, results=jobResults)
def _jobUpdateCandidate(self, candidateID, metricValue, results):
nullResults = results is None
if nullResults:
results = {'bestModel':None, 'bestValue':None}
else:
results = json.loads(results)
self.logger.debug("Updating old results %s"%(results))
oldCandidateID = results['bestModel']
oldMetricValue = results['bestValue']
results['bestModel'] = candidateID
results['bestValue'] = metricValue
isUpdated = candidateID == oldCandidateID
if isUpdated:
self.logger.info("Choosing new model. Old candidate: (id=%s, value=%s)"\
" New candidate: (id=%s, value=%f)"%\
(oldCandidateID, oldMetricValue, candidateID, metricValue))
else:
self.logger.info("Same model as before. id=%s, "\
"metric=%f"%(candidateID, metricValue))
self.logger.debug("New Results %s"%(results))
self._cjDB.jobUpdateResults(self._jobID, json.dumps(results))
def _getJobResults(self):
queryResults = self._cjDB.jobGetFields(self._jobID, ['results'])
if len(queryResults) == 0:
raise RuntimeError("Trying to update results for non-existent job")
results = queryResults[0]
return results
| 5,505 | Python | .py | 114 | 41.254386 | 92 | 0.659944 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,020 | experiment_utils.py | numenta_nupic-legacy/src/nupic/swarming/experiment_utils.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This file contains utility functions that are used
# internally by the prediction framework. It should not be
# imported by description files. (see helpers.py)
from nupic.support.enum import Enum
# TODO: This file contains duplicates of 'InferenceElement', 'InferenceType',
# and 'ModelResult' copied from nupic.frameworks.opf
# Will want to change this in the future!
class InferenceElement(Enum(
prediction="prediction",
encodings="encodings",
classification="classification",
anomalyScore="anomalyScore",
anomalyLabel="anomalyLabel",
classConfidences="classConfidences",
multiStepPredictions="multiStepPredictions",
multiStepBestPredictions="multiStepBestPredictions",
multiStepBucketLikelihoods="multiStepBucketLikelihoods",
multiStepBucketValues="multiStepBucketValues",
)):
__inferenceInputMap = {
"prediction": "dataRow",
"encodings": "dataEncodings",
"classification": "category",
"classConfidences": "category",
"multiStepPredictions": "dataDict",
"multiStepBestPredictions": "dataDict",
}
__temporalInferenceElements = None
@staticmethod
def getInputElement(inferenceElement):
""" Get the sensor input element that corresponds to the given inference
element. This is mainly used for metrics and prediction logging
"""
return InferenceElement.__inferenceInputMap.get(inferenceElement, None)
@staticmethod
def isTemporal(inferenceElement):
""" Returns True if the inference from this timestep is predicted the input
for the NEXT timestep.
NOTE: This should only be checked IF THE MODEL'S INFERENCE TYPE IS ALSO
TEMPORAL. That is, a temporal model CAN have non-temporal inference elements,
but a non-temporal model CANNOT have temporal inference elements
"""
if InferenceElement.__temporalInferenceElements is None:
InferenceElement.__temporalInferenceElements = \
set([InferenceElement.prediction])
return inferenceElement in InferenceElement.__temporalInferenceElements
@staticmethod
def getTemporalDelay(inferenceElement, key=None):
""" Returns the number of records that elapse between when an inference is
made and when the corresponding input record will appear. For example, a
multistep prediction for 3 timesteps out will have a delay of 3
Parameters:
-----------------------------------------------------------------------
inferenceElement: The InferenceElement value being delayed
key: If the inference is a dictionary type, this specifies
key for the sub-inference that is being delayed
"""
# -----------------------------------------------------------------------
# For next step prediction, we shift by 1
if inferenceElement in (InferenceElement.prediction,
InferenceElement.encodings):
return 1
# -----------------------------------------------------------------------
# For classification, anomaly scores, the inferences immediately succeed the
# inputs
if inferenceElement in (InferenceElement.anomalyScore,
InferenceElement.anomalyLabel,
InferenceElement.classification,
InferenceElement.classConfidences):
return 0
# -----------------------------------------------------------------------
# For multistep prediction, the delay is based on the key in the inference
# dictionary
if inferenceElement in (InferenceElement.multiStepPredictions,
InferenceElement.multiStepBestPredictions):
return int(key)
# -----------------------------------------------------------------------
# default: return 0
return 0
@staticmethod
def getMaxDelay(inferences):
"""
Returns the maximum delay for the InferenceElements in the inference
dictionary
Parameters:
-----------------------------------------------------------------------
inferences: A dictionary where the keys are InferenceElements
"""
maxDelay = 0
for inferenceElement, inference in inferences.iteritems():
if isinstance(inference, dict):
for key in inference.iterkeys():
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement,
key),
maxDelay)
else:
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement),
maxDelay)
return maxDelay
class InferenceType(Enum("TemporalNextStep",
"TemporalClassification",
"NontemporalClassification",
"TemporalAnomaly",
"NontemporalAnomaly",
"TemporalMultiStep",
"NontemporalMultiStep")):
__temporalInferenceTypes = None
@staticmethod
def isTemporal(inferenceType):
""" Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
"""
if InferenceType.__temporalInferenceTypes is None:
InferenceType.__temporalInferenceTypes = \
set([InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep])
return inferenceType in InferenceType.__temporalInferenceTypes
# ModelResult - A structure that contains the input to a model and the resulting
# predictions as well as any related information related to the predictions.
#
# predictionNumber: The prediction number. This should start at 0 and increase
# with each new ModelResult.
#
# rawInput: The input record, as input by the user. This is a dictionary-like
# object which has attributes whose names are the same as the input
# field names
#
# sensorInput: A SensorInput object that represents the input record, as it
# appears right before it is encoded. This may differ from the raw
# input in that certain input fields (such as DateTime fields) may
# be split into multiple encoded fields
#
# inferences: A dictionary of inferences. Each key is a InferenceType constant
# which corresponds to the type of prediction being made. Each value
# is a ___ element that corresponds to the actual prediction by the
# model, including auxillary information; TODO: fix description.
#
# metrics: The metrics corresponding to the most-recent prediction/ground
# truth pair
class ModelResult(object):
__slots__= ("predictionNumber", "rawInput", "sensorInput", "inferences",
"metrics", "predictedFieldIdx", "predictedFieldName")
def __init__(self,
predictionNumber=None,
rawInput=None,
sensorInput=None,
inferences=None,
metrics=None,
predictedFieldIdx=None,
predictedFieldName=None):
self.predictionNumber = predictionNumber
self.rawInput = rawInput
self.sensorInput = sensorInput
self.inferences = inferences
self.metrics = metrics
self.predictedFieldIdx = predictedFieldIdx
self.predictedFieldName = predictedFieldName
def __repr__(self):
return ("ModelResult("
"\tpredictionNumber={0}\n"
"\trawInput={1}\n"
"\tsensorInput={2}\n"
"\tinferences={3}\n"
"\tmetrics={4}\n"
"\tpredictedFieldIdx={5}\n"
"\tpredictedFieldName={6}\n"
")").format(self.predictionNumber,
self.rawInput,
self.sensorInput,
self.inferences,
self.metrics,
self.predictedFieldIdx,
self.predictedFieldName)
| 9,312 | Python | .py | 196 | 38.632653 | 81 | 0.617485 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,021 | permutations_runner.py | numenta_nupic-legacy/src/nupic/swarming/permutations_runner.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file permutations_runner.py
"""
import collections
import imp
import csv
from datetime import datetime, timedelta
import cPickle as pickle
import time
import subprocess
from nupic.swarming.hypersearch import object_json as json
import nupic.database.client_jobs_dao as cjdao
from nupic.swarming import hypersearch_worker
from nupic.swarming.hypersearch_v2 import HypersearchV2
from nupic.swarming.exp_generator.experiment_generator import expGenerator
from nupic.swarming.utils import *
g_currentVerbosityLevel = 0
gCurrentSearch = None
DEFAULT_OPTIONS = {"overwrite": False,
"expDescJsonPath": None,
"expDescConfig": None,
"permutationsScriptPath": None,
"outputLabel": "swarm_out",
"outDir": None,
"permWorkDir": None,
"action": "run",
"searchMethod": "v2",
"timeout": None,
"exports": None,
"useTerminators": False,
"maxWorkers": 2,
"replaceReport": False,
"maxPermutations": None,
"genTopNDescriptions": 1}
class Verbosity(object):
""" @private
"""
WARNING = 0
INFO = 1
DEBUG = 2
def _termHandler(signal, frame):
try:
jobrunner = gCurrentSearch
jobID = jobrunner._HyperSearchRunner__searchJob.getJobID()
except Exception as exc:
print exc
else:
print "Canceling jobs due to receiving SIGTERM"
cjdao.ClientJobsDAO.get().jobCancel(jobID)
def _setupInterruptHandling():
signal.signal(signal.SIGTERM, _termHandler)
signal.signal(signal.SIGINT, _termHandler)
def _verbosityEnabled(verbosityLevel):
return verbosityLevel <= g_currentVerbosityLevel
def _emit(verbosityLevel, info):
if _verbosityEnabled(verbosityLevel):
print info
def _escape(s):
"""Escape commas, tabs, newlines and dashes in a string
Commas are encoded as tabs
"""
assert isinstance(s, str), \
"expected %s but got %s; value=%s" % (type(str), type(s), s)
s = s.replace("\\", "\\\\")
s = s.replace("\n", "\\n")
s = s.replace("\t", "\\t")
s = s.replace(",", "\t")
return s
def _engineServicesRunning():
""" Return true if the engine services are running
"""
process = subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
result = process.returncode
if result != 0:
raise RuntimeError("Unable to check for running client job manager")
# See if the CJM is running
running = False
for line in stdout.split("\n"):
if "python" in line and "clientjobmanager.client_job_manager" in line:
running = True
break
return running
def _runHyperSearch(runOptions):
global gCurrentSearch
# Run HyperSearch
startTime = time.time()
search = _HyperSearchRunner(runOptions)
# Save in global for the signal handler.
gCurrentSearch = search
if runOptions["action"] in ("run", "dryRun"):
search.runNewSearch()
else:
search.pickupSearch()
# Generate reports
# Print results and generate report csv file
modelParams = _HyperSearchRunner.generateReport(
options=runOptions,
replaceReport=runOptions["replaceReport"],
hyperSearchJob=search.peekSearchJob(),
metricsKeys=search.getDiscoveredMetricsKeys())
secs = time.time() - startTime
hours = int(secs) / (60 * 60)
secs -= hours * (60 * 60)
minutes = int(secs) / 60
secs -= minutes * 60
print "Elapsed time (h:mm:ss): %d:%02d:%02d" % (hours, minutes, int(secs))
jobID = search.peekSearchJob().getJobID()
print "Hypersearch ClientJobs job ID: ", jobID
return modelParams
def _injectDefaultOptions(options):
return dict(DEFAULT_OPTIONS, **options)
def _validateOptions(options):
if "expDescJsonPath" not in options \
and "expDescConfig" not in options \
and "permutationsScriptPath" not in options:
raise Exception("Options must contain one of the following: "
"expDescJsonPath, expDescConfig, or "
"permutationsScriptPath.")
def _generateExpFilesFromSwarmDescription(swarmDescriptionJson, outDir):
# The expGenerator expects the JSON without newlines for an unknown reason.
expDescConfig = json.dumps(swarmDescriptionJson)
expDescConfig = expDescConfig.splitlines()
expDescConfig = "".join(expDescConfig)
expGenerator([
"--description=%s" % (expDescConfig),
"--outDir=%s" % (outDir)])
def _runAction(runOptions):
if not os.path.exists(runOptions["outDir"]):
os.makedirs(runOptions["outDir"])
if not os.path.exists(runOptions["permWorkDir"]):
os.makedirs(runOptions["permWorkDir"])
action = runOptions["action"]
# Print Nupic HyperSearch results from the current or last run
if action == "report":
returnValue = _HyperSearchRunner.generateReport(
options=runOptions,
replaceReport=runOptions["replaceReport"],
hyperSearchJob=None,
metricsKeys=None)
# Run HyperSearch
elif action in ("run", "dryRun", "pickup"):
returnValue = _runHyperSearch(runOptions)
else:
raise Exception("Unhandled action: %s" % action)
return returnValue
def _checkOverwrite(options, outDir):
overwrite = options["overwrite"]
if not overwrite:
for name in ("description.py", "permutations.py"):
if os.path.exists(os.path.join(outDir, name)):
raise RuntimeError("The %s file already exists and will be "
"overwritten by this tool. If it is OK to overwrite "
"this file, use the --overwrite option." % \
os.path.join(outDir, "description.py"))
# The overwrite option has already been used, so should be removed from the
# config at this point.
del options["overwrite"]
def runWithConfig(swarmConfig, options,
outDir=None, outputLabel="default",
permWorkDir=None, verbosity=1):
"""
Starts a swarm, given an dictionary configuration.
@param swarmConfig {dict} A complete [swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description) object.
@param outDir {string} Optional path to write swarm details (defaults to
current working directory).
@param outputLabel {string} Optional label for output (defaults to "default").
@param permWorkDir {string} Optional location of working directory (defaults
to current working directory).
@param verbosity {int} Optional (1,2,3) increasing verbosity of output.
@returns {object} Model parameters
"""
global g_currentVerbosityLevel
g_currentVerbosityLevel = verbosity
# Generate the description and permutations.py files in the same directory
# for reference.
if outDir is None:
outDir = os.getcwd()
if permWorkDir is None:
permWorkDir = os.getcwd()
_checkOverwrite(options, outDir)
_generateExpFilesFromSwarmDescription(swarmConfig, outDir)
options["expDescConfig"] = swarmConfig
options["outputLabel"] = outputLabel
options["outDir"] = outDir
options["permWorkDir"] = permWorkDir
runOptions = _injectDefaultOptions(options)
_validateOptions(runOptions)
return _runAction(runOptions)
def runWithJsonFile(expJsonFilePath, options, outputLabel, permWorkDir):
"""
Starts a swarm, given a path to a JSON file containing configuration.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param expJsonFilePath {string} Path to a JSON file containing the complete
[swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description).
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {int} Swarm job id.
"""
if "verbosityCount" in options:
verbosity = options["verbosityCount"]
del options["verbosityCount"]
else:
verbosity = 1
_setupInterruptHandling()
with open(expJsonFilePath, "r") as jsonFile:
expJsonConfig = json.loads(jsonFile.read())
outDir = os.path.dirname(expJsonFilePath)
return runWithConfig(expJsonConfig, options, outDir=outDir,
outputLabel=outputLabel, permWorkDir=permWorkDir,
verbosity=verbosity)
def runWithPermutationsScript(permutationsFilePath, options,
outputLabel, permWorkDir):
"""
Starts a swarm, given a path to a permutations.py script.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param permutationsFilePath {string} Path to permutations.py.
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {object} Model parameters.
"""
global g_currentVerbosityLevel
if "verbosityCount" in options:
g_currentVerbosityLevel = options["verbosityCount"]
del options["verbosityCount"]
else:
g_currentVerbosityLevel = 1
_setupInterruptHandling()
options["permutationsScriptPath"] = permutationsFilePath
options["outputLabel"] = outputLabel
options["outDir"] = permWorkDir
options["permWorkDir"] = permWorkDir
# Assume it's a permutations python script
runOptions = _injectDefaultOptions(options)
_validateOptions(runOptions)
return _runAction(runOptions)
def runPermutations(_):
"""
DEPRECATED. Use @ref runWithConfig.
"""
raise DeprecationWarning(
"nupic.swarming.permutations_runner.runPermutations() is no longer "
"implemented. It has been replaced with a simpler function for library "
"usage: nupic.swarming.permutations_runner.runWithConfig(). See docs "
"at http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html"
"programmatically for details.")
def _setUpExports(exports):
ret = ""
if exports is None:
return ret
exportDict = json.loads(exports)
for key in exportDict.keys():
if (sys.platform.startswith('win')):
ret+= "set \"%s=%s\" & " % (str(key), str(exportDict[key]))
else:
ret+= "export %s=%s;" % (str(key), str(exportDict[key]))
return ret
def _clientJobsDB():
"""
Returns: The shared cjdao.ClientJobsDAO instance
"""
return cjdao.ClientJobsDAO.get()
def _nupicHyperSearchHasErrors(hyperSearchJob):
"""Check whether any experiments failed in our latest hypersearch
Parameters:
hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved
jobID, if any
Returns: False if all models succeeded, True if one or more had errors
"""
# TODO flesh me out
# Get search ID for our latest hypersearch
# Query Nupic for experiment failures in the given search
return False
class _HyperSearchRunner(object):
""" @private
Manages one instance of HyperSearch"""
def __init__(self, options):
"""
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
retval: nothing
"""
self.__cjDAO = _clientJobsDB()
self._options = options
# _HyperSearchJob instance set up by runNewSearch() and pickupSearch()
self.__searchJob = None
self.__foundMetrcsKeySet = set()
# If we are instead relying on the engine to launch workers for us, this
# will stay as None, otherwise it becomes an array of subprocess Popen
# instances.
self._workers = None
return
def runNewSearch(self):
"""Start a new hypersearch job and monitor it to completion
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
self.__searchJob = self.__startSearch()
self.monitorSearchJob()
def pickupSearch(self):
"""Pick up the latest search from a saved jobID and monitor it to completion
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
self.__searchJob = self.loadSavedHyperSearchJob(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"])
self.monitorSearchJob()
def monitorSearchJob(self):
"""
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
assert self.__searchJob is not None
jobID = self.__searchJob.getJobID()
startTime = time.time()
lastUpdateTime = datetime.now()
# Monitor HyperSearch and report progress
# NOTE: may be -1 if it can't be determined
expectedNumModels = self.__searchJob.getExpectedNumModels(
searchMethod = self._options["searchMethod"])
lastNumFinished = 0
finishedModelIDs = set()
finishedModelStats = _ModelStats()
# Keep track of the worker state, results, and milestones from the job
# record
lastWorkerState = None
lastJobResults = None
lastModelMilestones = None
lastEngStatus = None
hyperSearchFinished = False
while not hyperSearchFinished:
jobInfo = self.__searchJob.getJobStatus(self._workers)
# Check for job completion BEFORE processing models; NOTE: this permits us
# to process any models that we may not have accounted for in the
# previous iteration.
hyperSearchFinished = jobInfo.isFinished()
# Look for newly completed models, and process them
modelIDs = self.__searchJob.queryModelIDs()
_emit(Verbosity.DEBUG,
"Current number of models is %d (%d of them completed)" % (
len(modelIDs), len(finishedModelIDs)))
if len(modelIDs) > 0:
# Build a list of modelIDs to check for completion
checkModelIDs = []
for modelID in modelIDs:
if modelID not in finishedModelIDs:
checkModelIDs.append(modelID)
del modelIDs
# Process newly completed models
if checkModelIDs:
_emit(Verbosity.DEBUG,
"Checking %d models..." % (len(checkModelIDs)))
errorCompletionMsg = None
for (i, modelInfo) in enumerate(_iterModels(checkModelIDs)):
_emit(Verbosity.DEBUG,
"[%s] Checking completion: %s" % (i, modelInfo))
if modelInfo.isFinished():
finishedModelIDs.add(modelInfo.getModelID())
finishedModelStats.update(modelInfo)
if (modelInfo.getCompletionReason().isError() and
not errorCompletionMsg):
errorCompletionMsg = modelInfo.getCompletionMsg()
# Update the set of all encountered metrics keys (we will use
# these to print column names in reports.csv)
metrics = modelInfo.getReportMetrics()
self.__foundMetrcsKeySet.update(metrics.keys())
numFinished = len(finishedModelIDs)
# Print current completion stats
if numFinished != lastNumFinished:
lastNumFinished = numFinished
if expectedNumModels is None:
expModelsStr = ""
else:
expModelsStr = "of %s" % (expectedNumModels)
stats = finishedModelStats
print ("<jobID: %s> %s %s models finished [success: %s; %s: %s; %s: "
"%s; %s: %s; %s: %s; %s: %s; %s: %s]" % (
jobID,
numFinished,
expModelsStr,
#stats.numCompletedSuccess,
(stats.numCompletedEOF+stats.numCompletedStopped),
"EOF" if stats.numCompletedEOF else "eof",
stats.numCompletedEOF,
"STOPPED" if stats.numCompletedStopped else "stopped",
stats.numCompletedStopped,
"KILLED" if stats.numCompletedKilled else "killed",
stats.numCompletedKilled,
"ERROR" if stats.numCompletedError else "error",
stats.numCompletedError,
"ORPHANED" if stats.numCompletedError else "orphaned",
stats.numCompletedOrphaned,
"UNKNOWN" if stats.numCompletedOther else "unknown",
stats.numCompletedOther))
# Print the first error message from the latest batch of completed
# models
if errorCompletionMsg:
print "ERROR MESSAGE: %s" % errorCompletionMsg
# Print the new worker state, if it changed
workerState = jobInfo.getWorkerState()
if workerState != lastWorkerState:
print "##>> UPDATED WORKER STATE: \n%s" % (pprint.pformat(workerState,
indent=4))
lastWorkerState = workerState
# Print the new job results, if it changed
jobResults = jobInfo.getResults()
if jobResults != lastJobResults:
print "####>> UPDATED JOB RESULTS: \n%s (elapsed time: %g secs)" \
% (pprint.pformat(jobResults, indent=4), time.time()-startTime)
lastJobResults = jobResults
# Print the new model milestones if they changed
modelMilestones = jobInfo.getModelMilestones()
if modelMilestones != lastModelMilestones:
print "##>> UPDATED MODEL MILESTONES: \n%s" % (
pprint.pformat(modelMilestones, indent=4))
lastModelMilestones = modelMilestones
# Print the new engine status if it changed
engStatus = jobInfo.getEngStatus()
if engStatus != lastEngStatus:
print "##>> UPDATED STATUS: \n%s" % (engStatus)
lastEngStatus = engStatus
# Sleep before next check
if not hyperSearchFinished:
if self._options["timeout"] != None:
if ((datetime.now() - lastUpdateTime) >
timedelta(minutes=self._options["timeout"])):
print "Timeout reached, exiting"
self.__cjDAO.jobCancel(jobID)
sys.exit(1)
time.sleep(1)
# Tabulate results
modelIDs = self.__searchJob.queryModelIDs()
print "Evaluated %s models" % len(modelIDs)
print "HyperSearch finished!"
jobInfo = self.__searchJob.getJobStatus(self._workers)
print "Worker completion message: %s" % (jobInfo.getWorkerCompletionMsg())
def _launchWorkers(self, cmdLine, numWorkers):
""" Launch worker processes to execute the given command line
Parameters:
-----------------------------------------------
cmdLine: The command line for each worker
numWorkers: number of workers to launch
"""
self._workers = []
for i in range(numWorkers):
stdout = tempfile.NamedTemporaryFile(delete=False)
stderr = tempfile.NamedTemporaryFile(delete=False)
p = subprocess.Popen(cmdLine, bufsize=1, env=os.environ, shell=True,
stdin=None, stdout=stdout, stderr=stderr)
p._stderr_file = stderr
p._stdout_file = stdout
self._workers.append(p)
def __startSearch(self):
"""Starts HyperSearch as a worker or runs it inline for the "dryRun" action
Parameters:
----------------------------------------------------------------------
retval: the new _HyperSearchJob instance representing the
HyperSearch job
"""
# This search uses a pre-existing permutations script
params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options,
forRunning=True)
if self._options["action"] == "dryRun":
args = [sys.argv[0], "--params=%s" % (json.dumps(params))]
print
print "=================================================================="
print "RUNNING PERMUTATIONS INLINE as \"DRY RUN\"..."
print "=================================================================="
jobID = hypersearch_worker.main(args)
else:
cmdLine = _setUpExports(self._options["exports"])
# Begin the new search. The {JOBID} string is replaced by the actual
# jobID returned from jobInsert.
cmdLine += "$HYPERSEARCH"
maxWorkers = self._options["maxWorkers"]
jobID = self.__cjDAO.jobInsert(
client="GRP",
cmdLine=cmdLine,
params=json.dumps(params),
minimumWorkers=1,
maximumWorkers=maxWorkers,
jobType=self.__cjDAO.JOB_TYPE_HS)
cmdLine = "python -m nupic.swarming.hypersearch_worker" \
" --jobID=%d" % (jobID)
self._launchWorkers(cmdLine, maxWorkers)
searchJob = _HyperSearchJob(jobID)
# Save search ID to file (this is used for report generation)
self.__saveHyperSearchJobID(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"],
hyperSearchJob=searchJob)
if self._options["action"] == "dryRun":
print "Successfully executed \"dry-run\" hypersearch, jobID=%d" % (jobID)
else:
print "Successfully submitted new HyperSearch job, jobID=%d" % (jobID)
_emit(Verbosity.DEBUG,
"Each worker executing the command line: %s" % (cmdLine,))
return searchJob
def peekSearchJob(self):
"""Retrieves the runner's _HyperSearchJob instance; NOTE: only available
after run().
Parameters:
----------------------------------------------------------------------
retval: _HyperSearchJob instance or None
"""
assert self.__searchJob is not None
return self.__searchJob
def getDiscoveredMetricsKeys(self):
"""Returns a tuple of all metrics keys discovered while running HyperSearch.
NOTE: This is an optimization so that our client may
use this info for generating the report csv file without having
to pre-scan all modelInfos
Parameters:
----------------------------------------------------------------------
retval: Tuple of metrics keys discovered while running
HyperSearch;
"""
return tuple(self.__foundMetrcsKeySet)
@classmethod
def printModels(cls, options):
"""Prints a listing of experiments that would take place without
actually executing them.
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
retval: nothing
"""
print "Generating experiment requests..."
searchParams = _ClientJobUtils.makeSearchJobParamsDict(options=options)
@classmethod
def generateReport(cls,
options,
replaceReport,
hyperSearchJob,
metricsKeys):
"""Prints all available results in the given HyperSearch job and emits
model information to the permutations report csv.
The job may be completed or still in progress.
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
replaceReport: True to replace existing report csv, if any; False to
append to existing report csv, if any
hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved
jobID, if any
metricsKeys: sequence of report metrics key names to include in report;
if None, will pre-scan all modelInfos to generate a complete
list of metrics key names.
retval: model parameters
"""
# Load _HyperSearchJob instance from storage, if not provided
if hyperSearchJob is None:
hyperSearchJob = cls.loadSavedHyperSearchJob(
permWorkDir=options["permWorkDir"],
outputLabel=options["outputLabel"])
modelIDs = hyperSearchJob.queryModelIDs()
bestModel = None
# If metricsKeys was not provided, pre-scan modelInfos to create the list;
# this is needed by _ReportCSVWriter
# Also scan the parameters to generate a list of encoders and search
# parameters
metricstmp = set()
searchVar = set()
for modelInfo in _iterModels(modelIDs):
if modelInfo.isFinished():
vars = modelInfo.getParamLabels().keys()
searchVar.update(vars)
metrics = modelInfo.getReportMetrics()
metricstmp.update(metrics.keys())
if metricsKeys is None:
metricsKeys = metricstmp
# Create a csv report writer
reportWriter = _ReportCSVWriter(hyperSearchJob=hyperSearchJob,
metricsKeys=metricsKeys,
searchVar=searchVar,
outputDirAbsPath=options["permWorkDir"],
outputLabel=options["outputLabel"],
replaceReport=replaceReport)
# Tallies of experiment dispositions
modelStats = _ModelStats()
#numCompletedOther = long(0)
print "\nResults from all experiments:"
print "----------------------------------------------------------------"
# Get common optimization metric info from permutations script
searchParams = hyperSearchJob.getParams()
(optimizationMetricKey, maximizeMetric) = (
_PermutationUtils.getOptimizationMetricInfo(searchParams))
# Print metrics, while looking for the best model
formatStr = None
# NOTE: we may find additional metrics if HyperSearch is still running
foundMetricsKeySet = set(metricsKeys)
sortedMetricsKeys = []
# pull out best Model from jobs table
jobInfo = _clientJobsDB().jobInfo(hyperSearchJob.getJobID())
# Try to return a decent error message if the job was cancelled for some
# reason.
if jobInfo.cancel == 1:
raise Exception(jobInfo.workerCompletionMsg)
try:
results = json.loads(jobInfo.results)
except Exception, e:
print "json.loads(jobInfo.results) raised an exception. " \
"Here is some info to help with debugging:"
print "jobInfo: ", jobInfo
print "jobInfo.results: ", jobInfo.results
print "EXCEPTION: ", e
raise
bestModelNum = results["bestModel"]
bestModelIterIndex = None
# performance metrics for the entire job
totalWallTime = 0
totalRecords = 0
# At the end, we will sort the models by their score on the optimization
# metric
scoreModelIDDescList = []
for (i, modelInfo) in enumerate(_iterModels(modelIDs)):
# Output model info to report csv
reportWriter.emit(modelInfo)
# Update job metrics
totalRecords+=modelInfo.getNumRecords()
format = "%Y-%m-%d %H:%M:%S"
startTime = modelInfo.getStartTime()
if modelInfo.isFinished():
endTime = modelInfo.getEndTime()
st = datetime.strptime(startTime, format)
et = datetime.strptime(endTime, format)
totalWallTime+=(et-st).seconds
# Tabulate experiment dispositions
modelStats.update(modelInfo)
# For convenience
expDesc = modelInfo.getModelDescription()
reportMetrics = modelInfo.getReportMetrics()
optimizationMetrics = modelInfo.getOptimizationMetrics()
if modelInfo.getModelID() == bestModelNum:
bestModel = modelInfo
bestModelIterIndex=i
bestMetric = optimizationMetrics.values()[0]
# Keep track of the best-performing model
if optimizationMetrics:
assert len(optimizationMetrics) == 1, (
"expected 1 opt key, but got %d (%s) in %s" % (
len(optimizationMetrics), optimizationMetrics, modelInfo))
# Append to our list of modelIDs and scores
if modelInfo.getCompletionReason().isEOF():
scoreModelIDDescList.append((optimizationMetrics.values()[0],
modelInfo.getModelID(),
modelInfo.getGeneratedDescriptionFile(),
modelInfo.getParamLabels()))
print "[%d] Experiment %s\n(%s):" % (i, modelInfo, expDesc)
if (modelInfo.isFinished() and
not (modelInfo.getCompletionReason().isStopped or
modelInfo.getCompletionReason().isEOF())):
print ">> COMPLETION MESSAGE: %s" % modelInfo.getCompletionMsg()
if reportMetrics:
# Update our metrics key set and format string
foundMetricsKeySet.update(reportMetrics.iterkeys())
if len(sortedMetricsKeys) != len(foundMetricsKeySet):
sortedMetricsKeys = sorted(foundMetricsKeySet)
maxKeyLen = max([len(k) for k in sortedMetricsKeys])
formatStr = " %%-%ds" % (maxKeyLen+2)
# Print metrics
for key in sortedMetricsKeys:
if key in reportMetrics:
if key == optimizationMetricKey:
m = "%r (*)" % reportMetrics[key]
else:
m = "%r" % reportMetrics[key]
print formatStr % (key+":"), m
print
# Summarize results
print "--------------------------------------------------------------"
if len(modelIDs) > 0:
print "%d experiments total (%s).\n" % (
len(modelIDs),
("all completed successfully"
if (modelStats.numCompletedKilled + modelStats.numCompletedEOF) ==
len(modelIDs)
else "WARNING: %d models have not completed or there were errors" % (
len(modelIDs) - (
modelStats.numCompletedKilled + modelStats.numCompletedEOF +
modelStats.numCompletedStopped))))
if modelStats.numStatusOther > 0:
print "ERROR: models with unexpected status: %d" % (
modelStats.numStatusOther)
print "WaitingToStart: %d" % modelStats.numStatusWaitingToStart
print "Running: %d" % modelStats.numStatusRunning
print "Completed: %d" % modelStats.numStatusCompleted
if modelStats.numCompletedOther > 0:
print " ERROR: models with unexpected completion reason: %d" % (
modelStats.numCompletedOther)
print " ran to EOF: %d" % modelStats.numCompletedEOF
print " ran to stop signal: %d" % modelStats.numCompletedStopped
print " were orphaned: %d" % modelStats.numCompletedOrphaned
print " killed off: %d" % modelStats.numCompletedKilled
print " failed: %d" % modelStats.numCompletedError
assert modelStats.numStatusOther == 0, "numStatusOther=%s" % (
modelStats.numStatusOther)
assert modelStats.numCompletedOther == 0, "numCompletedOther=%s" % (
modelStats.numCompletedOther)
else:
print "0 experiments total."
# Print out the field contributions
print
global gCurrentSearch
jobStatus = hyperSearchJob.getJobStatus(gCurrentSearch._workers)
jobResults = jobStatus.getResults()
if "fieldContributions" in jobResults:
print "Field Contributions:"
pprint.pprint(jobResults["fieldContributions"], indent=4)
else:
print "Field contributions info not available"
# Did we have an optimize key?
if bestModel is not None:
maxKeyLen = max([len(k) for k in sortedMetricsKeys])
maxKeyLen = max(maxKeyLen, len(optimizationMetricKey))
formatStr = " %%-%ds" % (maxKeyLen+2)
bestMetricValue = bestModel.getOptimizationMetrics().values()[0]
optimizationMetricName = bestModel.getOptimizationMetrics().keys()[0]
print
print "Best results on the optimization metric %s (maximize=%s):" % (
optimizationMetricName, maximizeMetric)
print "[%d] Experiment %s (%s):" % (
bestModelIterIndex, bestModel, bestModel.getModelDescription())
print formatStr % (optimizationMetricName+":"), bestMetricValue
print
print "Total number of Records processed: %d" % totalRecords
print
print "Total wall time for all models: %d" % totalWallTime
hsJobParams = hyperSearchJob.getParams()
# Were we asked to write out the top N model description files?
if options["genTopNDescriptions"] > 0:
print "\nGenerating description files for top %d models..." % (
options["genTopNDescriptions"])
scoreModelIDDescList.sort()
scoreModelIDDescList = scoreModelIDDescList[
0:options["genTopNDescriptions"]]
i = -1
for (score, modelID, description, paramLabels) in scoreModelIDDescList:
i += 1
outDir = os.path.join(options["permWorkDir"], "model_%d" % (i))
print "Generating description file for model %s at %s" % \
(modelID, outDir)
if not os.path.exists(outDir):
os.makedirs(outDir)
# Fix up the location to the base description file.
# importBaseDescription() chooses the file relative to the calling file.
# The calling file is in outDir.
# The base description is in the user-specified "outDir"
base_description_path = os.path.join(options["outDir"],
"description.py")
base_description_relpath = os.path.relpath(base_description_path,
start=outDir)
description = description.replace(
"importBaseDescription('base.py', config)",
"importBaseDescription('%s', config)" % base_description_relpath)
fd = open(os.path.join(outDir, "description.py"), "wb")
fd.write(description)
fd.close()
# Generate a csv file with the parameter settings in it
fd = open(os.path.join(outDir, "params.csv"), "wb")
writer = csv.writer(fd)
colNames = paramLabels.keys()
colNames.sort()
writer.writerow(colNames)
row = [paramLabels[x] for x in colNames]
writer.writerow(row)
fd.close()
print "Generating model params file..."
# Generate a model params file alongside the description.py
mod = imp.load_source("description", os.path.join(outDir,
"description.py"))
model_description = mod.descriptionInterface.getModelDescription()
fd = open(os.path.join(outDir, "model_params.py"), "wb")
fd.write("%s\nMODEL_PARAMS = %s" % (getCopyrightHead(),
pprint.pformat(model_description)))
fd.close()
print
reportWriter.finalize()
return model_description
@classmethod
def loadSavedHyperSearchJob(cls, permWorkDir, outputLabel):
"""Instantiates a _HyperSearchJob instance from info saved in file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: _HyperSearchJob instance; raises exception if not found
"""
jobID = cls.__loadHyperSearchJobID(permWorkDir=permWorkDir,
outputLabel=outputLabel)
searchJob = _HyperSearchJob(nupicJobID=jobID)
return searchJob
@classmethod
def __saveHyperSearchJobID(cls, permWorkDir, outputLabel, hyperSearchJob):
"""Saves the given _HyperSearchJob instance's jobID to file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
hyperSearchJob: _HyperSearchJob instance
retval: nothing
"""
jobID = hyperSearchJob.getJobID()
filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,
outputLabel=outputLabel)
if os.path.exists(filePath):
_backupFile(filePath)
d = dict(hyperSearchJobID = jobID)
with open(filePath, "wb") as jobIdPickleFile:
pickle.dump(d, jobIdPickleFile)
@classmethod
def __loadHyperSearchJobID(cls, permWorkDir, outputLabel):
"""Loads a saved jobID from file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: HyperSearch jobID; raises exception if not found.
"""
filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,
outputLabel=outputLabel)
jobID = None
with open(filePath, "r") as jobIdPickleFile:
jobInfo = pickle.load(jobIdPickleFile)
jobID = jobInfo["hyperSearchJobID"]
return jobID
@classmethod
def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):
"""Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID
"""
# Get the base path and figure out the path of the report file.
basePath = permWorkDir
# Form the name of the output csv file that will contain all the results
filename = "%s_HyperSearchJobID.pkl" % (outputLabel,)
filepath = os.path.join(basePath, filename)
return filepath
class _ModelStats(object):
""" @private
"""
def __init__(self):
# Tallies of experiment dispositions
self.numStatusWaitingToStart = long(0)
self.numStatusRunning = long(0)
self.numStatusCompleted = long(0)
self.numStatusOther = long(0)
#self.numCompletedSuccess = long(0)
self.numCompletedKilled = long(0)
self.numCompletedError = long(0)
self.numCompletedStopped = long(0)
self.numCompletedEOF = long(0)
self.numCompletedOther = long(0)
self.numCompletedOrphaned = long(0)
def update(self, modelInfo):
# Tabulate experiment dispositions
if modelInfo.isWaitingToStart():
self.numStatusWaitingToStart += 1
elif modelInfo.isRunning():
self.numStatusRunning += 1
elif modelInfo.isFinished():
self.numStatusCompleted += 1
reason = modelInfo.getCompletionReason()
# if reason.isSuccess():
# self.numCompletedSuccess += 1
if reason.isEOF():
self.numCompletedEOF += 1
elif reason.isKilled():
self.numCompletedKilled += 1
elif reason.isStopped():
self.numCompletedStopped += 1
elif reason.isError():
self.numCompletedError += 1
elif reason.isOrphaned():
self.numCompletedOrphaned += 1
else:
self.numCompletedOther += 1
else:
self.numStatusOther += 1
class _ReportCSVWriter(object):
""" @private
"""
__totalModelTime = timedelta()
def __init__(self,
hyperSearchJob,
metricsKeys,
searchVar,
outputDirAbsPath,
outputLabel,
replaceReport):
"""
Parameters:
----------------------------------------------------------------------
hyperSearchJob: _HyperSearchJob instance
metricsKeys: sequence of report metrics key names to include in report
outputDirAbsPath:
Directory for creating report CSV file (absolute path)
outputLabel: A string label to incorporate into report CSV file name
replaceReport: True to replace existing report csv, if any; False to
append to existing report csv, if any
retval: nothing
"""
self.__searchJob = hyperSearchJob
self.__searchJobID = hyperSearchJob.getJobID()
self.__sortedMetricsKeys = sorted(metricsKeys)
self.__outputDirAbsPath = os.path.abspath(outputDirAbsPath)
self.__outputLabel = outputLabel
self.__replaceReport = replaceReport
self.__sortedVariableNames=searchVar
# These are set up by __openAndInitCSVFile
self.__csvFileObj = None
self.__reportCSVPath = None
self.__backupCSVPath = None
def emit(self, modelInfo):
"""Emit model info to csv file
Parameters:
----------------------------------------------------------------------
modelInfo: _NupicModelInfo instance
retval: nothing
"""
# Open/init csv file, if needed
if self.__csvFileObj is None:
# sets up self.__sortedVariableNames and self.__csvFileObj
self.__openAndInitCSVFile(modelInfo)
csv = self.__csvFileObj
# Emit model info row to report.csv
print >> csv, "%s, " % (self.__searchJobID),
print >> csv, "%s, " % (modelInfo.getModelID()),
print >> csv, "%s, " % (modelInfo.statusAsString()),
if modelInfo.isFinished():
print >> csv, "%s, " % (modelInfo.getCompletionReason()),
else:
print >> csv, "NA, ",
if not modelInfo.isWaitingToStart():
print >> csv, "%s, " % (modelInfo.getStartTime()),
else:
print >> csv, "NA, ",
if modelInfo.isFinished():
dateFormat = "%Y-%m-%d %H:%M:%S"
startTime = modelInfo.getStartTime()
endTime = modelInfo.getEndTime()
print >> csv, "%s, " % endTime,
st = datetime.strptime(startTime, dateFormat)
et = datetime.strptime(endTime, dateFormat)
print >> csv, "%s, " % (str((et - st).seconds)),
else:
print >> csv, "NA, ",
print >> csv, "NA, ",
print >> csv, "%s, " % str(modelInfo.getModelDescription()),
print >> csv, "%s, " % str(modelInfo.getNumRecords()),
paramLabelsDict = modelInfo.getParamLabels()
for key in self.__sortedVariableNames:
# Some values are complex structures,.. which need to be represented as
# strings
if key in paramLabelsDict:
print >> csv, "%s, " % (paramLabelsDict[key]),
else:
print >> csv, "None, ",
metrics = modelInfo.getReportMetrics()
for key in self.__sortedMetricsKeys:
value = metrics.get(key, "NA")
value = str(value)
value = value.replace("\n", " ")
print >> csv, "%s, " % (value),
print >> csv
def finalize(self):
"""Close file and print report/backup csv file paths
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
if self.__csvFileObj is not None:
# Done with file
self.__csvFileObj.close()
self.__csvFileObj = None
print "Report csv saved in %s" % (self.__reportCSVPath,)
if self.__backupCSVPath:
print "Previous report csv file was backed up to %s" % \
(self.__backupCSVPath,)
else:
print "Nothing was written to report csv file."
def __openAndInitCSVFile(self, modelInfo):
"""
- Backs up old report csv file;
- opens the report csv file in append or overwrite mode (per
self.__replaceReport);
- emits column fields;
- sets up self.__sortedVariableNames, self.__csvFileObj,
self.__backupCSVPath, and self.__reportCSVPath
Parameters:
----------------------------------------------------------------------
modelInfo: First _NupicModelInfo instance passed to emit()
retval: nothing
"""
# Get the base path and figure out the path of the report file.
basePath = self.__outputDirAbsPath
# Form the name of the output csv file that will contain all the results
reportCSVName = "%s_Report.csv" % (self.__outputLabel,)
reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)
# If a report CSV file already exists, back it up
backupCSVPath = None
if os.path.exists(reportCSVPath):
backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)
# Open report file
if self.__replaceReport:
mode = "w"
else:
mode = "a"
csv = self.__csvFileObj = open(reportCSVPath, mode)
# If we are appending, add some blank line separators
if not self.__replaceReport and backupCSVPath:
print >> csv
print >> csv
# Print the column names
print >> csv, "jobID, ",
print >> csv, "modelID, ",
print >> csv, "status, " ,
print >> csv, "completionReason, ",
print >> csv, "startTime, ",
print >> csv, "endTime, ",
print >> csv, "runtime(s), " ,
print >> csv, "expDesc, ",
print >> csv, "numRecords, ",
for key in self.__sortedVariableNames:
print >> csv, "%s, " % key,
for key in self.__sortedMetricsKeys:
print >> csv, "%s, " % key,
print >> csv
class _NupicJob(object):
""" @private
Our Nupic Job abstraction"""
def __init__(self, nupicJobID):
"""_NupicJob constructor
Parameters:
----------------------------------------------------------------------
retval: Nupic Client JobID of the job
"""
self.__nupicJobID = nupicJobID
jobInfo = _clientJobsDB().jobInfo(nupicJobID)
assert jobInfo is not None, "jobID=%s not found" % nupicJobID
assert jobInfo.jobId == nupicJobID, "%s != %s" % (jobInfo.jobId, nupicJobID)
_emit(Verbosity.DEBUG, "_NupicJob: \n%s" % pprint.pformat(jobInfo, indent=4))
if jobInfo.params is not None:
self.__params = json.loads(jobInfo.params)
else:
self.__params = None
def __repr__(self):
"""
Parameters:
----------------------------------------------------------------------
retval: representation of this _NupicJob instance
"""
return "%s(jobID=%s)" % (self.__class__.__name__, self.__nupicJobID)
def getJobStatus(self, workers):
"""
Parameters:
----------------------------------------------------------------------
workers: If this job was launched outside of the nupic job engine, then this
is an array of subprocess Popen instances, one for each worker
retval: _NupicJob.JobStatus instance
"""
jobInfo = self.JobStatus(self.__nupicJobID, workers)
return jobInfo
def getJobID(self):
"""Semi-private method for retrieving the jobId
Parameters:
----------------------------------------------------------------------
retval: Nupic Client JobID of this _NupicJob instance
"""
return self.__nupicJobID
def getParams(self):
"""Semi-private method for retrieving the job-specific params
Parameters:
----------------------------------------------------------------------
retval: Job params dict corresponding to the JSON params value
returned by ClientJobsDAO.jobInfo()
"""
return self.__params
class JobStatus(object):
""" @private
Our Nupic Job Info abstraction class"""
# Job Status values (per client_jobs_dao.py):
__nupicJobStatus_NotStarted = cjdao.ClientJobsDAO.STATUS_NOTSTARTED
__nupicJobStatus_Starting = cjdao.ClientJobsDAO.STATUS_STARTING
__nupicJobStatus_running = cjdao.ClientJobsDAO.STATUS_RUNNING
__nupicJobStatus_completed = cjdao.ClientJobsDAO.STATUS_COMPLETED
def __init__(self, nupicJobID, workers):
"""_NupicJob.JobStatus Constructor
Parameters:
----------------------------------------------------------------------
nupicJobID: Nupic ClientJob ID
workers: If this job was launched outside of the Nupic job engine, then this
is an array of subprocess Popen instances, one for each worker
retval: nothing
"""
jobInfo = _clientJobsDB().jobInfo(nupicJobID)
assert jobInfo.jobId == nupicJobID, "%s != %s" % (jobInfo.jobId, nupicJobID)
# If we launched the workers ourself, set the job status based on the
# workers that are still running
if workers is not None:
runningCount = 0
for worker in workers:
retCode = worker.poll()
if retCode is None:
runningCount += 1
if runningCount > 0:
status = cjdao.ClientJobsDAO.STATUS_RUNNING
else:
status = cjdao.ClientJobsDAO.STATUS_COMPLETED
if retCode != 0:
with open(worker._stderr_file.name, 'r') as err:
_emit(Verbosity.WARNING, "Job %d failed with error: %s" % (jobInfo.jobId, err.read()))
jobInfo = jobInfo._replace(status=status)
_emit(Verbosity.DEBUG, "JobStatus: \n%s" % pprint.pformat(jobInfo,
indent=4))
self.__jobInfo = jobInfo
def __repr__(self):
return "%s(jobId=%s, status=%s, completionReason=%s, " \
"startTime=%s, endTime=%s)" % (
self.__class__.__name__, self.__jobInfo.jobId,
self.statusAsString(), self.__jobInfo.completionReason,
self.__jobInfo.startTime, self.__jobInfo.endTime)
def statusAsString(self):
"""
Parameters:
----------------------------------------------------------------------
retval: Job status as a human-readable string
"""
return self.__jobInfo.status
def isWaitingToStart(self):
"""
Parameters:
----------------------------------------------------------------------
retval: True if the job has not been started yet
"""
waiting = (self.__jobInfo.status == self.__nupicJobStatus_NotStarted)
return waiting
def isStarting(self):
"""
Parameters:
----------------------------------------------------------------------
retval: True if the job is starting
"""
starting = (self.__jobInfo.status == self.__nupicJobStatus_Starting)
return starting
def isRunning(self):
"""
Parameters:
----------------------------------------------------------------------
retval: True if the job is running
"""
running = (self.__jobInfo.status == self.__nupicJobStatus_running)
return running
def isFinished(self):
"""
Parameters:
----------------------------------------------------------------------
retval: True if the job has finished (either with success or
failure)
"""
done = (self.__jobInfo.status == self.__nupicJobStatus_completed)
return done
def getCompletionReason(self):
"""Returns _JobCompletionReason.
NOTE: it's an error to call this method if isFinished() would return
False.
Parameters:
----------------------------------------------------------------------
retval: _JobCompletionReason instance
"""
assert self.isFinished(), "Too early to tell: %s" % self
return _JobCompletionReason(self.__jobInfo.completionReason)
def getCompletionMsg(self):
"""Returns job completion message.
NOTE: it's an error to call this method if isFinished() would return
False.
Parameters:
----------------------------------------------------------------------
retval: completion message
"""
assert self.isFinished(), "Too early to tell: %s" % self
return "%s" % self.__jobInfo.completionMsg
def getWorkerCompletionMsg(self):
"""Returns the worker generated completion message.
NOTE: it's an error to call this method if isFinished() would return
False.
Parameters:
----------------------------------------------------------------------
retval: completion message
"""
assert self.isFinished(), "Too early to tell: %s" % self
return "%s" % self.__jobInfo.workerCompletionMsg
def getStartTime(self):
"""Returns job start time.
NOTE: it's an error to call this method if isWaitingToStart() would
return True.
Parameters:
----------------------------------------------------------------------
retval: job processing start time
"""
assert not self.isWaitingToStart(), "Too early to tell: %s" % self
return "%s" % self.__jobInfo.startTime
def getEndTime(self):
"""Returns job end time.
NOTE: it's an error to call this method if isFinished() would return
False.
Parameters:
----------------------------------------------------------------------
retval: job processing end time
"""
assert self.isFinished(), "Too early to tell: %s" % self
return "%s" % self.__jobInfo.endTime
def getWorkerState(self):
"""Returns the worker state field.
Parameters:
----------------------------------------------------------------------
retval: worker state field as a dict
"""
if self.__jobInfo.engWorkerState is not None:
return json.loads(self.__jobInfo.engWorkerState)
else:
return None
def getResults(self):
"""Returns the results field.
Parameters:
----------------------------------------------------------------------
retval: job results field as a dict
"""
if self.__jobInfo.results is not None:
return json.loads(self.__jobInfo.results)
else:
return None
def getModelMilestones(self):
"""Returns the model milestones field.
Parameters:
----------------------------------------------------------------------
retval: model milestones as a dict
"""
if self.__jobInfo.engModelMilestones is not None:
return json.loads(self.__jobInfo.engModelMilestones)
else:
return None
def getEngStatus(self):
"""Returns the engine status field - used for progress messages
Parameters:
----------------------------------------------------------------------
retval: engine status field as string
"""
return self.__jobInfo.engStatus
class _JobCompletionReason(object):
""" @private
Represents completion reason for Client Jobs and Models"""
def __init__(self, reason):
"""
Parameters:
----------------------------------------------------------------------
reason: completion reason value from ClientJobsDAO.jobInfo()
"""
self.__reason = reason
def __str__(self):
return "%s" % self.__reason
def __repr__(self):
return "%s(reason=%s)" % (self.__class__.__name__, self.__reason)
def isEOF(self):
return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_EOF
def isSuccess(self):
return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_SUCCESS
def isStopped(self):
return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_STOPPED
def isKilled(self):
return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_KILLED
def isOrphaned(self):
return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_ORPHAN
def isError(self):
return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_ERROR
class _HyperSearchJob(_NupicJob):
""" @private
This class represents a single running Nupic HyperSearch job"""
def __init__(self, nupicJobID):
"""
Parameters:
----------------------------------------------------------------------
nupicJobID: Nupic Client JobID of a HyperSearch job
retval: nothing
"""
super(_HyperSearchJob, self).__init__(nupicJobID)
# Cache of the total count of expected models or -1 if it can't be
# deteremined.
#
# Set by getExpectedNumModels()
#
# TODO: update code to handle non-ronomatic search algorithms
self.__expectedNumModels = None
def queryModelIDs(self):
"""Queuries DB for model IDs of all currently instantiated models
associated with this HyperSearch job.
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
retval: A sequence of Nupic modelIDs
"""
jobID = self.getJobID()
modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID)
modelIDs = tuple(x[0] for x in modelCounterPairs)
return modelIDs
def getExpectedNumModels(self, searchMethod):
"""Returns: the total number of expected models if known, -1 if it can't
be determined.
NOTE: this can take a LONG time to complete for HyperSearches with a huge
number of possible permutations.
Parameters:
----------------------------------------------------------------------
searchMethod: "v2" is the only method currently supported
retval: The total number of expected models, if known; -1 if unknown
"""
return self.__expectedNumModels
class _ClientJobUtils(object):
""" @private
Our Nupic Client Job utilities"""
@classmethod
def makeSearchJobParamsDict(cls, options, forRunning=False):
"""Constructs a dictionary of HyperSearch parameters suitable for converting
to json and passing as the params argument to ClientJobsDAO.jobInsert()
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
forRunning: True if the params are for running a Hypersearch job; False
if params are for introspection only.
retval: A dictionary of HyperSearch parameters for
ClientJobsDAO.jobInsert()
"""
if options["searchMethod"] == "v2":
hsVersion = "v2"
else:
raise Exception("Unsupported search method: %r" % options["searchMethod"])
maxModels = options["maxPermutations"]
if options["action"] == "dryRun" and maxModels is None:
maxModels = 1
useTerminators = options["useTerminators"]
if useTerminators is None:
params = {
"hsVersion": hsVersion,
"maxModels": maxModels,
}
else:
params = {
"hsVersion": hsVersion,
"useTerminators": useTerminators,
"maxModels": maxModels,
}
if forRunning:
params["persistentJobGUID"] = str(uuid.uuid1())
if options["permutationsScriptPath"]:
params["permutationsPyFilename"] = options["permutationsScriptPath"]
elif options["expDescConfig"]:
params["description"] = options["expDescConfig"]
else:
with open(options["expDescJsonPath"], mode="r") as fp:
params["description"] = json.load(fp)
return params
class _PermutationUtils(object):
""" @private
Utilities for running permutations"""
@classmethod
def getOptimizationMetricInfo(cls, searchJobParams):
"""Retrives the optimization key name and optimization function.
Parameters:
---------------------------------------------------------
searchJobParams:
Parameter for passing as the searchParams arg to
Hypersearch constructor.
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
"""
if searchJobParams["hsVersion"] == "v2":
search = HypersearchV2(searchParams=searchJobParams)
else:
raise RuntimeError("Unsupported hypersearch version \"%s\"" % \
(searchJobParams["hsVersion"]))
info = search.getOptimizationMetricInfo()
return info
def _backupFile(filePath):
"""Back up a file
Parameters:
----------------------------------------------------------------------
retval: Filepath of the back-up
"""
assert os.path.exists(filePath)
stampNum = 0
(prefix, suffix) = os.path.splitext(filePath)
while True:
backupPath = "%s.%d%s" % (prefix, stampNum, suffix)
stampNum += 1
if not os.path.exists(backupPath):
break
shutil.copyfile(filePath, backupPath)
return backupPath
def _getOneModelInfo(nupicModelID):
"""A convenience function that retrieves inforamtion about a single model
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
nupicModelID: Nupic modelID
retval: _NupicModelInfo instance for the given nupicModelID.
"""
return _iterModels([nupicModelID]).next()
def _iterModels(modelIDs):
"""Creates an iterator that returns ModelInfo elements for the given modelIDs
WARNING: The order of ModelInfo elements returned by the iterator
may not match the order of the given modelIDs
Parameters:
----------------------------------------------------------------------
modelIDs: A sequence of model identifiers (e.g., as returned by
_HyperSearchJob.queryModelIDs()).
retval: Iterator that returns ModelInfo elements for the given
modelIDs (NOTE:possibly in a different order)
"""
class ModelInfoIterator(object):
"""ModelInfo iterator implementation class
"""
# Maximum number of ModelInfo elements to load into cache whenever
# cache empties
__CACHE_LIMIT = 1000
debug=False
def __init__(self, modelIDs):
"""
Parameters:
----------------------------------------------------------------------
modelIDs: a sequence of Nupic model identifiers for which this
iterator will return _NupicModelInfo instances.
NOTE: The returned instances are NOT guaranteed to be in
the same order as the IDs in modelIDs sequence.
retval: nothing
"""
# Make our own copy in case caller changes model id list during iteration
self.__modelIDs = tuple(modelIDs)
if self.debug:
_emit(Verbosity.DEBUG,
"MODELITERATOR: __init__; numModelIDs=%s" % len(self.__modelIDs))
self.__nextIndex = 0
self.__modelCache = collections.deque()
return
def __iter__(self):
"""Iterator Protocol function
Parameters:
----------------------------------------------------------------------
retval: self
"""
return self
def next(self):
"""Iterator Protocol function
Parameters:
----------------------------------------------------------------------
retval: A _NupicModelInfo instance or raises StopIteration to
signal end of iteration.
"""
return self.__getNext()
def __getNext(self):
"""Implementation of the next() Iterator Protocol function.
When the modelInfo cache becomes empty, queries Nupic and fills the cache
with the next set of NupicModelInfo instances.
Parameters:
----------------------------------------------------------------------
retval: A _NupicModelInfo instance or raises StopIteration to
signal end of iteration.
"""
if self.debug:
_emit(Verbosity.DEBUG,
"MODELITERATOR: __getNext(); modelCacheLen=%s" % (
len(self.__modelCache)))
if not self.__modelCache:
self.__fillCache()
if not self.__modelCache:
raise StopIteration()
return self.__modelCache.popleft()
def __fillCache(self):
"""Queries Nupic and fills an empty modelInfo cache with the next set of
_NupicModelInfo instances
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
assert (not self.__modelCache)
# Assemble a list of model IDs to look up
numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0
if self.__nextIndex >= numModelIDs:
return
idRange = self.__nextIndex + self.__CACHE_LIMIT
if idRange > numModelIDs:
idRange = numModelIDs
lookupIDs = self.__modelIDs[self.__nextIndex:idRange]
self.__nextIndex += (idRange - self.__nextIndex)
# Query Nupic for model info of all models in the look-up list
# NOTE: the order of results may not be the same as lookupIDs
infoList = _clientJobsDB().modelsInfo(lookupIDs)
assert len(infoList) == len(lookupIDs), \
"modelsInfo returned %s elements; expected %s." % \
(len(infoList), len(lookupIDs))
# Create _NupicModelInfo instances and add them to cache
for rawInfo in infoList:
modelInfo = _NupicModelInfo(rawInfo=rawInfo)
self.__modelCache.append(modelInfo)
assert len(self.__modelCache) == len(lookupIDs), \
"Added %s elements to modelCache; expected %s." % \
(len(self.__modelCache), len(lookupIDs))
if self.debug:
_emit(Verbosity.DEBUG,
"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s" % \
(len(self.__modelCache),))
return ModelInfoIterator(modelIDs)
class _NupicModelInfo(object):
""" @private
This class represents information obtained from ClientJobManager about a
model
"""
__nupicModelStatus_notStarted = cjdao.ClientJobsDAO.STATUS_NOTSTARTED
__nupicModelStatus_running = cjdao.ClientJobsDAO.STATUS_RUNNING
__nupicModelStatus_completed = cjdao.ClientJobsDAO.STATUS_COMPLETED
__rawInfo = None
def __init__(self, rawInfo):
"""
Parameters:
----------------------------------------------------------------------
rawInfo: A single model information element as returned by
ClientJobsDAO.modelsInfo()
retval: nothing.
"""
self.__rawInfo = rawInfo
# Cached model metrics (see __unwrapResults())
self.__cachedResults = None
assert self.__rawInfo.params is not None
# Cached model params (see __unwrapParams())
self.__cachedParams = None
def __repr__(self):
"""
Parameters:
----------------------------------------------------------------------
retval: Representation of this _NupicModelInfo instance.
"""
return ("%s(jobID=%s, modelID=%s, status=%s, completionReason=%s, "
"updateCounter=%s, numRecords=%s)" % (
"_NupicModelInfo",
self.__rawInfo.jobId,
self.__rawInfo.modelId,
self.__rawInfo.status,
self.__rawInfo.completionReason,
self.__rawInfo.updateCounter,
self.__rawInfo.numRecords))
def getModelID(self):
"""
Parameters:
----------------------------------------------------------------------
retval: Nupic modelID associated with this model info.
"""
return self.__rawInfo.modelId
def statusAsString(self):
"""
Parameters:
----------------------------------------------------------------------
retval: Human-readable string representation of the model's status.
"""
return "%s" % self.__rawInfo.status
def getModelDescription(self):
"""
Parameters:
----------------------------------------------------------------------
retval: Printable description of the model.
"""
params = self.__unwrapParams()
if "experimentName" in params:
return params["experimentName"]
else:
paramSettings = self.getParamLabels()
# Form a csv friendly string representation of this model
items = []
for key, value in paramSettings.items():
items.append("%s_%s" % (key, value))
return ".".join(items)
def getGeneratedDescriptionFile(self):
"""
Parameters:
----------------------------------------------------------------------
retval: Contents of the sub-experiment description file for
this model
"""
return self.__rawInfo.genDescription
def getNumRecords(self):
"""
Paramets:
----------------------------------------------------------------------
retval: The number of records processed by the model.
"""
return self.__rawInfo.numRecords
def getParamLabels(self):
"""
Parameters:
----------------------------------------------------------------------
retval: a dictionary of model parameter labels. For each entry
the key is the name of the parameter and the value
is the value chosen for it.
"""
params = self.__unwrapParams()
# Hypersearch v2 stores the flattened parameter settings in "particleState"
if "particleState" in params:
retval = dict()
queue = [(pair, retval) for pair in
params["particleState"]["varStates"].iteritems()]
while len(queue) > 0:
pair, output = queue.pop()
k, v = pair
if ("position" in v and "bestPosition" in v and
"velocity" in v):
output[k] = v["position"]
else:
if k not in output:
output[k] = dict()
queue.extend((pair, output[k]) for pair in v.iteritems())
return retval
def __unwrapParams(self):
"""Unwraps self.__rawInfo.params into the equivalent python dictionary
and caches it in self.__cachedParams. Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: Model params dictionary as correpsonding to the json
as returned in ClientJobsDAO.modelsInfo()[x].params
"""
if self.__cachedParams is None:
self.__cachedParams = json.loads(self.__rawInfo.params)
assert self.__cachedParams is not None, \
"%s resulted in None" % self.__rawInfo.params
return self.__cachedParams
def getReportMetrics(self):
"""Retrives a dictionary of metrics designated for report
Parameters:
----------------------------------------------------------------------
retval: a dictionary of metrics that were collected for the model or
an empty dictionary if there aren't any.
"""
return self.__unwrapResults().reportMetrics
def getOptimizationMetrics(self):
"""Retrives a dictionary of metrics designagted for optimization
Parameters:
----------------------------------------------------------------------
retval: a dictionary of optimization metrics that were collected
for the model or an empty dictionary if there aren't any.
"""
return self.__unwrapResults().optimizationMetrics
def getAllMetrics(self):
"""Retrives a dictionary of metrics that combines all report and
optimization metrics
Parameters:
----------------------------------------------------------------------
retval: a dictionary of optimization metrics that were collected
for the model; an empty dictionary if there aren't any.
"""
result = self.getReportMetrics()
result.update(self.getOptimizationMetrics())
return result
ModelResults = collections.namedtuple("ModelResultsTuple",
["reportMetrics",
"optimizationMetrics"])
"""Each element is a dictionary: property name is the metric name and
property value is the metric value as generated by the model
"""
def __unwrapResults(self):
"""Unwraps self.__rawInfo.results and caches it in self.__cachedResults;
Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: ModelResults namedtuple instance
"""
if self.__cachedResults is None:
if self.__rawInfo.results is not None:
resultList = json.loads(self.__rawInfo.results)
assert len(resultList) == 2, \
"Expected 2 elements, but got %s (%s)." % (
len(resultList), resultList)
self.__cachedResults = self.ModelResults(
reportMetrics=resultList[0],
optimizationMetrics=resultList[1])
else:
self.__cachedResults = self.ModelResults(
reportMetrics={},
optimizationMetrics={})
return self.__cachedResults
def isWaitingToStart(self):
"""
Parameters:
----------------------------------------------------------------------
retval: True if the job has not been started yet
"""
waiting = (self.__rawInfo.status == self.__nupicModelStatus_notStarted)
return waiting
def isRunning(self):
"""
Parameters:
----------------------------------------------------------------------
retval: True if the job has not been started yet
"""
running = (self.__rawInfo.status == self.__nupicModelStatus_running)
return running
def isFinished(self):
"""
Parameters:
----------------------------------------------------------------------
retval: True if the model's processing has completed (either with
success or failure).
"""
finished = (self.__rawInfo.status == self.__nupicModelStatus_completed)
return finished
def getCompletionReason(self):
"""Returns _ModelCompletionReason.
NOTE: it's an error to call this method if isFinished() would return False.
Parameters:
----------------------------------------------------------------------
retval: _ModelCompletionReason instance
"""
assert self.isFinished(), "Too early to tell: %s" % self
return _ModelCompletionReason(self.__rawInfo.completionReason)
def getCompletionMsg(self):
"""Returns model completion message.
NOTE: it's an error to call this method if isFinished() would return False.
Parameters:
----------------------------------------------------------------------
retval: completion message
"""
assert self.isFinished(), "Too early to tell: %s" % self
return self.__rawInfo.completionMsg
def getStartTime(self):
"""Returns model evaluation start time.
NOTE: it's an error to call this method if isWaitingToStart() would
return True.
Parameters:
----------------------------------------------------------------------
retval: model evaluation start time
"""
assert not self.isWaitingToStart(), "Too early to tell: %s" % self
return "%s" % self.__rawInfo.startTime
def getEndTime(self):
"""Returns mode evaluation end time.
NOTE: it's an error to call this method if isFinished() would return False.
Parameters:
----------------------------------------------------------------------
retval: model evaluation end time
"""
assert self.isFinished(), "Too early to tell: %s" % self
return "%s" % self.__rawInfo.endTime
class _ModelCompletionReason(_JobCompletionReason):
""" @private
"""
pass
| 75,753 | Python | .py | 1,784 | 34.986547 | 159 | 0.60613 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,022 | particle.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/particle.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import logging
import StringIO
import copy
import pprint
import random
from nupic.swarming.hypersearch.permutation_helpers import PermuteChoices
class Particle(object):
"""Construct a particle. Each particle evaluates one or more models
serially. Each model represents a position that the particle is evaluated
at.
Each position is a set of values chosen for each of the permutation variables.
The particle's best position is the value of the permutation variables when it
did best on the optimization metric.
Some permutation variables are treated like traditional particle swarm
variables - that is they have a position and velocity. Others are simply
choice variables, for example a list of strings. We follow a different
methodology for choosing each permutation variable value depending on its
type.
A particle belongs to 1 and only 1 swarm. A swarm is a collection of particles
that all share the same global best position. A swarm is identified by its
specific combination of fields. If we are evaluating multiple different field
combinations, then there will be multiple swarms. A Hypersearch Worker (HSW)
will only instantiate and run one particle at a time. When done running a
particle, another worker can pick it up, pick a new position, for it and run
it based on the particle state information which is stored in each model table
entry.
Each particle has a generationIdx. It starts out at generation #0. Every time
a model evaluation completes and the particle is moved to a different position
(to evaluate a different model), the generation index is incremented.
Every particle that is created has a unique particleId. The particleId
is a string formed as '<workerConnectionId>.<particleIdx>', where particleIdx
starts at 0 for each worker and increments by 1 every time a new particle
is created by that worker.
"""
_nextParticleID = 0
def __init__(self, hsObj, resultsDB, flattenedPermuteVars,
swarmId=None, newFarFrom=None, evolveFromState=None,
newFromClone=None, newParticleId=False):
""" Create a particle.
There are 3 fundamentally different methods of instantiating a particle:
1.) You can instantiate a new one from scratch, at generation index #0. This
particle gets a new particleId.
required: swarmId
optional: newFarFrom
must be None: evolveFromState, newFromClone
2.) You can instantiate one from savedState, in which case it's generation
index is incremented (from the value stored in the saved state) and
its particleId remains the same.
required: evolveFromState
optional:
must be None: flattenedPermuteVars, swarmId, newFromClone
3.) You can clone another particle, creating a new particle at the same
generationIdx but a different particleId. This new particle will end
up at exactly the same position as the one it was cloned from. If
you want to move it to the next position, or just jiggle it a bit, call
newPosition() or agitate() after instantiation.
required: newFromClone
optional:
must be None: flattenedPermuteVars, swarmId, evolveFromState
Parameters:
--------------------------------------------------------------------
hsObj: The HypersearchV2 instance
resultsDB: the ResultsDB instance that holds all the model results
flattenedPermuteVars: dict() containing the (key, PermuteVariable) pairs
of the flattened permutation variables as read from the permutations
file.
swarmId: String that represents the encoder names of the encoders that are
to be included in this particle's model. Of the form
'encoder1.encoder2'.
Required for creation method #1.
newFarFrom: If not None, this is a list of other particleState dicts in the
swarm that we want to be as far away from as possible. Optional
argument for creation method #1.
evolveFromState: If not None, evolve an existing particle. This is a
dict containing the particle's state. Preserve the particleId, but
increment the generation index. Required for creation method #2.
newFromClone: If not None, clone this other particle's position and generation
index, with small random perturbations. This is a dict containing the
particle's state. Required for creation method #3.
newParticleId: Only applicable when newFromClone is True. Give the clone
a new particle ID.
"""
# Save constructor arguments
self._hsObj = hsObj
self.logger = hsObj.logger
self._resultsDB = resultsDB
# See the random number generator used for all the variables in this
# particle. We will seed it differently based on the construction method,
# below.
self._rng = random.Random()
self._rng.seed(42)
# Setup our variable set by taking what's in flattenedPermuteVars and
# stripping out vars that belong to encoders we are not using.
def _setupVars(flattenedPermuteVars):
allowedEncoderNames = self.swarmId.split('.')
self.permuteVars = copy.deepcopy(flattenedPermuteVars)
# Remove fields we don't want.
varNames = self.permuteVars.keys()
for varName in varNames:
# Remove encoders we're not using
if ':' in varName: # if an encoder
if varName.split(':')[0] not in allowedEncoderNames:
self.permuteVars.pop(varName)
continue
# All PermuteChoice variables need to know all prior results obtained
# with each choice.
if isinstance(self.permuteVars[varName], PermuteChoices):
if self._hsObj._speculativeParticles:
maxGenIdx = None
else:
maxGenIdx = self.genIdx - 1
resultsPerChoice = self._resultsDB.getResultsPerChoice(
swarmId=self.swarmId, maxGenIdx=maxGenIdx, varName=varName)
self.permuteVars[varName].setResultsPerChoice(
resultsPerChoice.values())
# Method #1
# Create from scratch, optionally pushing away from others that already
# exist.
if swarmId is not None:
assert (evolveFromState is None)
assert (newFromClone is None)
# Save construction param
self.swarmId = swarmId
# Assign a new unique ID to this particle
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
# Init the generation index
self.genIdx = 0
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Push away from other particles?
if newFarFrom is not None:
for varName in self.permuteVars.iterkeys():
otherPositions = []
for particleState in newFarFrom:
otherPositions.append(
particleState['varStates'][varName]['position'])
self.permuteVars[varName].pushAwayFrom(otherPositions, self._rng)
# Give this particle a unique seed.
self._rng.seed(str(otherPositions))
# Method #2
# Instantiate from saved state, preserving particleId but incrementing
# generation index.
elif evolveFromState is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (newFromClone is None)
# Setup other variables from saved state
self.particleId = evolveFromState['id']
self.genIdx = evolveFromState['genIdx'] + 1
self.swarmId = evolveFromState['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# saved state
self.initStateFrom(self.particleId, evolveFromState, newBest=True)
# Move it to the next position. We need the swarm best for this.
self.newPosition()
# Method #3
# Clone another particle, producing a new particle at the same genIdx with
# the same particleID. This is used to re-run an orphaned model.
elif newFromClone is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (evolveFromState is None)
# Setup other variables from clone particle
self.particleId = newFromClone['id']
if newParticleId:
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
self.genIdx = newFromClone['genIdx']
self.swarmId = newFromClone['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# the clone
self.initStateFrom(self.particleId, newFromClone, newBest=False)
else:
assert False, "invalid creation parameters"
# Log it
self.logger.debug("Created particle: %s" % (str(self)))
def __repr__(self):
return "Particle(swarmId=%s) [particleId=%s, genIdx=%d, " \
"permuteVars=\n%s]" % (self.swarmId, self.particleId,
self.genIdx,
pprint.pformat(self.permuteVars, indent=4))
def getState(self):
"""Get the particle state as a dict. This is enough information to
instantiate this particle on another worker."""
varStates = dict()
for varName, var in self.permuteVars.iteritems():
varStates[varName] = var.getState()
return dict(id=self.particleId,
genIdx=self.genIdx,
swarmId=self.swarmId,
varStates=varStates)
def initStateFrom(self, particleId, particleState, newBest):
"""Init all of our variable positions, velocities, and optionally the best
result and best position from the given particle.
If newBest is true, we get the best result and position for this new
generation from the resultsDB, This is used when evoloving a particle
because the bestResult and position as stored in was the best AT THE TIME
THAT PARTICLE STARTED TO RUN and does not include the best since that
particle completed.
"""
# Get the update best position and result?
if newBest:
(bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId)
else:
bestResult = bestPosition = None
# Replace with the position and velocity of each variable from
# saved state
varStates = particleState['varStates']
for varName in varStates.keys():
varState = copy.deepcopy(varStates[varName])
if newBest:
varState['bestResult'] = bestResult
if bestPosition is not None:
varState['bestPosition'] = bestPosition[varName]
self.permuteVars[varName].setState(varState)
def copyEncoderStatesFrom(self, particleState):
"""Copy all encoder variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if ':' in varName: # if an encoder
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def copyVarStatesFrom(self, particleState, varNames):
"""Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if varName in varNames:
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def getPosition(self):
"""Return the position of this particle. This returns a dict() of key
value pairs where each key is the name of the flattened permutation
variable and the value is its chosen value.
Parameters:
--------------------------------------------------------------
retval: dict() of flattened permutation choices
"""
result = dict()
for (varName, value) in self.permuteVars.iteritems():
result[varName] = value.getPosition()
return result
@staticmethod
def getPositionFromState(pState):
"""Return the position of a particle given its state dict.
Parameters:
--------------------------------------------------------------
retval: dict() of particle position, keys are the variable names,
values are their positions
"""
result = dict()
for (varName, value) in pState['varStates'].iteritems():
result[varName] = value['position']
return result
def agitate(self):
"""Agitate this particle so that it is likely to go to a new position.
Every time agitate is called, the particle is jiggled an even greater
amount.
Parameters:
--------------------------------------------------------------
retval: None
"""
for (varName, var) in self.permuteVars.iteritems():
var.agitate()
self.newPosition()
def newPosition(self, whichVars=None):
# TODO: incorporate data from choice variables....
# TODO: make sure we're calling this when appropriate.
"""Choose a new position based on results obtained so far from all other
particles.
Parameters:
--------------------------------------------------------------
whichVars: If not None, only move these variables
retval: new position
"""
# Get the global best position for this swarm generation
globalBestPosition = None
# If speculative particles are enabled, use the global best considering
# even particles in the current generation. This gives better results
# but does not provide repeatable results because it depends on
# worker timing
if self._hsObj._speculativeParticles:
genIdx = self.genIdx
else:
genIdx = self.genIdx - 1
if genIdx >= 0:
(bestModelId, _) = self._resultsDB.bestModelIdAndErrScore(self.swarmId,
genIdx)
if bestModelId is not None:
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(
bestModelId)
globalBestPosition = Particle.getPositionFromState(particleState)
# Update each variable
for (varName, var) in self.permuteVars.iteritems():
if whichVars is not None and varName not in whichVars:
continue
if globalBestPosition is None:
var.newPosition(None, self._rng)
else:
var.newPosition(globalBestPosition[varName], self._rng)
# get the new position
position = self.getPosition()
# Log the new position
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "New particle position: \n%s" % (pprint.pformat(position,
indent=4))
print >> msg, "Particle variables:"
for (varName, var) in self.permuteVars.iteritems():
print >> msg, " %s: %s" % (varName, str(var))
self.logger.debug(msg.getvalue())
msg.close()
return position
| 18,043 | Python | .py | 380 | 40.107895 | 82 | 0.667539 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,023 | regression.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/regression.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import deque
import math
""" This file contains some utility classes for computing linear and exponential
regressions. It is primarily used by the maturity checking logic in the
ModelRunner to see whether or not the model should be marked as mature. """
class LinearRegression(object):
""" Helper class to compute the slope of a best-fit line given a set of (x,y)
points. This is an object that keeps track of some extra state in order to compute
the slope efficiently
"""
def __init__(self, windowSize = None):
self._sum_xy = 0
self._sum_x = 0
self._sum_y = 0
self._sum_x_sq = 0
self._n = 0
if windowSize is not None:
self._windowSize = windowSize
self._window = deque(maxlen=windowSize)
def addPoint(self, x, y):
self._sum_x += x
self._sum_y += y
self._sum_xy += x*y
self._sum_x_sq += x*x
self._n += 1
if self._window is not None:
if len(self._window) == self._windowSize:
self.removePoint(*self._window.popleft())
self._window.append((x,y))
def removePoint(self, x, y):
self._sum_x -= x
self._sum_y -= y
self._sum_xy -= x*y
self._sum_x_sq -= x*x
self._n -=1
def getSlope(self):
if self._n < 2:
return None
if self._window is not None and \
len(self._window) < self._windowSize:
return None
den = self._sum_x_sq - self._sum_x**2 / float(self._n)
if den == 0:
return None
num = self._sum_xy - self._sum_x * self._sum_y / float(self._n)
return num/den
class ExponentialRegression(object):
""" Helper class for computing the average percent change for a best-fit
exponential function given a set of (x,y) points. This class tries to fit
a function of the form y(x) = a e^(bx) to the data. The function getPctChange()
returns the value of e^(b)-1, where e^(b) is the ratio y(x+1)/y(x) for the
best-fit curve. """
def __init__(self, windowSize=None):
self._linReg = LinearRegression(windowSize)
def addPoint(self, x, y):
self._linReg.addPoint(x, math.log(y))
def removePoint(self, x, y):
self._linReg.removePoint(x, math.log(y))
def getPctChange(self):
slope = self._linReg.getSlope()
if slope is None:
return None
return math.exp(slope) - 1
class AveragePctChange(object):
def __init__(self, windowSize=None):
self._sum_pct_change = 0
self._sum_pct_change_abs = 0
self._n = 0
self._last = None
if windowSize is not None:
self._windowSize = windowSize
self._window = deque(maxlen=windowSize)
def addPoint(self, x, y):
if self._n > 0:
if self._last != 0:
pctChange = (y-self._last)/self._last
else:
pctChange = 0
self._sum_pct_change += pctChange
self._sum_pct_change_abs += abs(pctChange)
self._window.append((x, pctChange))
self._n += 1
self._last = y
if len(self._window) == self._windowSize:
self.removePoint(*self._window.popleft())
def removePoint(self, x, pctChange):
self._sum_pct_change -= pctChange
self._sum_pct_change_abs -= abs(pctChange)
self._n -= 1
def getPctChanges(self):
if self._n < self._windowSize:
return None,None
return (self._sum_pct_change/self._n,
self._sum_pct_change_abs/self._n)
| 4,367 | Python | .py | 114 | 33.210526 | 86 | 0.640692 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,024 | swarm_terminator.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/swarm_terminator.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import logging
import copy
from nupic.swarming.hypersearch.support import Configuration
def _flattenKeys(keys):
return '|'.join(keys)
class SwarmTerminator(object):
"""Class that records the performane of swarms in a sprint and makes
decisions about which swarms should stop running. This is a usful optimization
that identifies field combinations that no longer need to be run.
"""
MATURITY_WINDOW = None
MAX_GENERATIONS = None
_DEFAULT_MILESTONES = [1.0 / (x + 1) for x in xrange(12)]
def __init__(self, milestones=None, logLevel=None):
# Set class constants.
self.MATURITY_WINDOW = int(Configuration.get(
"nupic.hypersearch.swarmMaturityWindow"))
self.MAX_GENERATIONS = int(Configuration.get(
"nupic.hypersearch.swarmMaxGenerations"))
if self.MAX_GENERATIONS < 0:
self.MAX_GENERATIONS = None
# Set up instsance variables.
self._isTerminationEnabled = bool(int(Configuration.get(
'nupic.hypersearch.enableSwarmTermination')))
self.swarmBests = dict()
self.swarmScores = dict()
self.terminatedSwarms = set([])
self._logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
if milestones is not None:
self.milestones = milestones
else:
self.milestones = copy.deepcopy(self._DEFAULT_MILESTONES)
def recordDataPoint(self, swarmId, generation, errScore):
"""Record the best score for a swarm's generation index (x)
Returns list of swarmIds to terminate.
"""
terminatedSwarms = []
# Append score to existing swarm.
if swarmId in self.swarmScores:
entry = self.swarmScores[swarmId]
assert(len(entry) == generation)
entry.append(errScore)
entry = self.swarmBests[swarmId]
entry.append(min(errScore, entry[-1]))
assert(len(self.swarmBests[swarmId]) == len(self.swarmScores[swarmId]))
else:
# Create list of scores for a new swarm
assert (generation == 0)
self.swarmScores[swarmId] = [errScore]
self.swarmBests[swarmId] = [errScore]
# If the current swarm hasn't completed at least MIN_GENERATIONS, it should
# not be candidate for maturation or termination. This prevents the initial
# allocation of particles in PSO from killing off a field combination too
# early.
if generation + 1 < self.MATURITY_WINDOW:
return terminatedSwarms
# If the swarm has completed more than MAX_GENERATIONS, it should be marked
# as mature, regardless of how its value is changing.
if self.MAX_GENERATIONS is not None and generation > self.MAX_GENERATIONS:
self._logger.info(
'Swarm %s has matured (more than %d generations). Stopping' %
(swarmId, self.MAX_GENERATIONS))
terminatedSwarms.append(swarmId)
if self._isTerminationEnabled:
terminatedSwarms.extend(self._getTerminatedSwarms(generation))
# Return which swarms to kill when we've reached maturity
# If there is no change in the swarm's best for some time,
# Mark it dead
cumulativeBestScores = self.swarmBests[swarmId]
if cumulativeBestScores[-1] == cumulativeBestScores[-self.MATURITY_WINDOW]:
self._logger.info('Swarm %s has matured (no change in %d generations).'
'Stopping...'% (swarmId, self.MATURITY_WINDOW))
terminatedSwarms.append(swarmId)
self.terminatedSwarms = self.terminatedSwarms.union(terminatedSwarms)
return terminatedSwarms
def numDataPoints(self, swarmId):
if swarmId in self.swarmScores:
return len(self.swarmScores[swarmId])
else:
return 0
def _getTerminatedSwarms(self, generation):
terminatedSwarms = []
generationScores = dict()
for swarm, scores in self.swarmScores.iteritems():
if len(scores) > generation and swarm not in self.terminatedSwarms:
generationScores[swarm] = scores[generation]
if len(generationScores) == 0:
return
bestScore = min(generationScores.values())
tolerance = self.milestones[generation]
for swarm, score in generationScores.iteritems():
if score > (1 + tolerance) * bestScore:
self._logger.info('Swarm %s is doing poorly at generation %d.\n'
'Current Score:%s \n'
'Best Score:%s \n'
'Tolerance:%s. Stopping...',
swarm, generation, score, bestScore, tolerance)
terminatedSwarms.append(swarm)
return terminatedSwarms
| 5,565 | Python | .py | 120 | 40.05 | 80 | 0.682355 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,025 | model_terminator.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/model_terminator.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import functools
import logging
from collections import namedtuple
PeriodicActivityRequest = namedtuple("PeriodicActivityRequest",
("repeating", "period", "cb"))
class ModelTerminator(object):
"""
This is essentially an static class that handles the logic for
terminating bad models.
"""
_MILESTONES = [(10, 0.5),
(15, 0.25),
(20, 0.01)]
def __init__(self, modelID, cjDAO, logLevel = None):
self._modelID = modelID
self._cjDB = cjDAO
self.logger = self.logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
self._jobID = self._cjDB.modelsGetFields(modelID, ['jobId'])[0]
if logLevel is not None:
self.logger.setLevel(logLevel)
self.logger.info("Created new ModelTerminator for model %d"%modelID)
def getTerminationCallbacks(self, terminationFunc):
""" Returns the periodic checks to see if the model should
continue running.
Parameters:
-----------------------------------------------------------------------
terminationFunc: The function that will be called in the model main loop
as a wrapper around this function. Must have a parameter
called 'index'
Returns: A list of PeriodicActivityRequest objects.
"""
activities = [None] * len(ModelTerminator._MILESTONES)
for index, (iteration, _) in enumerate(ModelTerminator._MILESTONES):
cb = functools.partial(terminationFunc, index=index)
activities[index] = PeriodicActivityRequest(repeating =False,
period = iteration,
cb=cb)
def checkIsTerminated(self, metric, milestoneIndex):
bestMetric = self._cjDB.jobGetFields(self._jobID,['results'])[0]['bestMetric']
tolerance = ModelTerminator._MILESTONES[self._index](1)
# Right now we're assuming that we want to minize the metric
if metric >= (1.0 + tolerance) * bestMetric:
self.logger.info("Model %d underperforming (metric:%f, best:%f). Canceling..."
%(metric, bestMetric))
self._cjDB.modelSetFields(self._modelID,
{'engCancel':True},
ignoreUnchanged = True)
return True
return False
| 3,397 | Python | .py | 70 | 40.742857 | 84 | 0.618341 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,026 | error_codes.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/error_codes.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
class ErrorCodes(object):
streamReading = "E10001"
tooManyModelErrs = "E10002"
hypersearchLogicErr = "E10003"
productionModelErr = "E10004" # General PM error
modelCommandFormatErr = "E10005" # Invalid model command request object
tooManyFailedWorkers = "E10006"
unspecifiedErr = "E10007"
modelInputLostErr = "E10008" # Input stream was garbage-collected
requestOutOfRange = "E10009" # If a request range is invalid
invalidType = "E10010" # Invalid
| 1,518 | Python | .py | 31 | 47.193548 | 78 | 0.667116 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,027 | object_json.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/object_json.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""JSON encoding and decoding."""
# Pylint gets confused about return types from deserialization.
# pylint: disable=E1103
import json
import sys
NON_OBJECT_TYPES = (type(None), bool, int, float, long, str, unicode)
class Types(object):
TUPLE = 'py/tuple'
SET = 'py/set'
DATETIME = 'datetime/datetime.datetime'
REPR = 'py/repr'
OBJECT = 'py/object'
KEYS = 'py/dict/keys'
def getImportPath(obj):
cls = obj.__class__
return '%s.%s' % (cls.__module__, cls.__name__)
def convertDict(obj):
obj = dict(obj)
for k, v in obj.items():
del obj[k]
if not (isinstance(k, str) or isinstance(k, unicode)):
k = dumps(k)
# Keep track of which keys need to be decoded when loading.
if Types.KEYS not in obj:
obj[Types.KEYS] = []
obj[Types.KEYS].append(k)
obj[k] = convertObjects(v)
return obj
def restoreKeysPostDecoding(obj):
if isinstance(obj, dict):
if Types.KEYS in obj:
for k in obj[Types.KEYS]:
v = obj[k]
del obj[k]
newKey = loads(k)
obj[newKey] = v
del obj[Types.KEYS]
for k, v in obj.items():
if isinstance(v, dict):
obj[k] = restoreKeysPostDecoding(v)
elif isinstance(obj, list):
obj = [restoreKeysPostDecoding(item) for item in obj]
elif isinstance(obj, set):
obj = set([restoreKeysPostDecoding(item) for item in obj])
elif isinstance(obj, tuple):
obj = tuple([restoreKeysPostDecoding(item) for item in obj])
return obj
def convertObjects(obj):
if type(obj) in NON_OBJECT_TYPES:
return obj
elif isinstance(obj, list):
return [convertObjects(item) for item in obj]
elif isinstance(obj, dict):
return convertDict(obj)
elif isinstance(obj, tuple):
return {Types.TUPLE: [convertObjects(item) for item in obj]}
elif isinstance(obj, set):
return {Types.SET: [convertObjects(item) for item in obj]}
else:
if hasattr(obj, '__getstate__'):
state = obj.__getstate__()
elif hasattr(obj, '__slots__'):
values = map(lambda x: getattr(obj, x), obj.__slots__)
state = dict(zip(obj.__slots__, values))
elif hasattr(obj, '__dict__'):
state = obj.__dict__
else:
if not hasattr(obj, '__class__'):
raise TypeError('Cannot encode object: %s' % repr(obj))
state = {Types.REPR: repr(obj)}
state[Types.OBJECT] = getImportPath(obj)
return convertObjects(state)
def objectDecoderHook(obj):
obj = restoreKeysPostDecoding(obj)
if isinstance(obj, dict):
if Types.TUPLE in obj:
return tuple(obj[Types.TUPLE])
elif Types.SET in obj:
return set(obj[Types.SET])
elif Types.DATETIME in obj:
return eval(obj[Types.DATETIME])
elif Types.REPR in obj:
module, name = obj[Types.OBJECT].rsplit('.', 1)
return eval(obj[Types.REPR], {module: __import__(module)})
elif Types.OBJECT in obj:
module, name = obj[Types.OBJECT].rsplit('.', 1)
__import__(module)
cls = getattr(sys.modules[module], name)
try:
if hasattr(cls, '__new__'):
instance = cls.__new__(cls)
else:
instance = object.__new__(cls)
except TypeError:
try:
instance = cls()
except TypeError:
raise TypeError('Old style class cannot be instantiated: %s' %
obj[Types.OBJECT])
attrs = obj
del attrs[Types.OBJECT]
if hasattr(instance, '__setstate__'):
instance.__setstate__(attrs)
else:
for k, v in attrs.iteritems():
setattr(instance, k, v)
return instance
return obj
def clean(s):
"""Removes trailing whitespace on each line."""
lines = [l.rstrip() for l in s.split('\n')]
return '\n'.join(lines)
def dumps(obj, **kwargs):
return clean(json.dumps(convertObjects(obj), **kwargs))
def dump(obj, f, **kwargs):
f.write(dumps(obj, **kwargs))
def loads(s, **kwargs):
return restoreKeysPostDecoding(
json.loads(s, object_hook=objectDecoderHook, **kwargs))
def load(f, **kwargs):
return restoreKeysPostDecoding(
json.load(f, object_hook=objectDecoderHook, **kwargs))
| 5,091 | Python | .py | 142 | 31.007042 | 72 | 0.646963 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,028 | permutation_helpers.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/permutation_helpers.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This class provides utility classes and functions for use inside permutations
scripts.
"""
import random
import numpy
from nupic.swarming.hypersearch.support import Configuration
class PermuteVariable(object):
"""The base class of all PermuteXXX classes that can be used from within
a permutation script."""
def __init__(self):
pass
def getState(self):
"""Return the current state of this particle. This is used for
communicating our state into a model record entry so that it can be
instantiated on another worker."""
raise NotImplementedError
def setState(self, state):
"""Set the current state of this particle. This is counterpart to getState.
"""
raise NotImplementedError
def getPosition(self):
"""for int vars, returns position to nearest int
Parameters:
--------------------------------------------------------------
retval: current position
"""
raise NotImplementedError
def agitate(self):
"""This causes the variable to jiggle away from its current position.
It does this by increasing its velocity by a multiplicative factor.
Every time agitate() is called, the velocity will increase. In this way,
you can call agitate over and over again until the variable reaches a
new position."""
raise NotImplementedError
#=========================================================================
def newPosition(self, globalBestPosition, rng):
"""Choose a new position based on results obtained so far from other
particles and the passed in globalBestPosition.
Parameters:
--------------------------------------------------------------
globalBestPosition: global best position for this colony
rng: instance of random.Random() used for generating
random numbers
retval: new position
"""
raise NotImplementedError
def pushAwayFrom(self, otherVars, rng):
"""Choose a new position that is as far away as possible from all
'otherVars', where 'otherVars' is a list of PermuteVariable instances.
Parameters:
--------------------------------------------------------------
otherVars: list of other PermuteVariables to push away from
rng: instance of random.Random() used for generating
random numbers
"""
raise NotImplementedError
def resetVelocity(self, rng):
"""Reset the velocity to be some fraction of the total distance. This
is called usually when we start a new swarm and want to start at the
previous best position found in the previous swarm but with a
velocity which is a known fraction of the total distance between min
and max.
Parameters:
--------------------------------------------------------------
rng: instance of random.Random() used for generating
random numbers
"""
raise NotImplementedError
class PermuteFloat(PermuteVariable):
"""Define a permutation variable which can take on floating point values."""
def __init__(self, min, max, stepSize=None, inertia=None, cogRate=None,
socRate=None):
"""Construct a variable that permutes over floating point values using
the Particle Swarm Optimization (PSO) algorithm. See descriptions of
PSO (i.e. http://en.wikipedia.org/wiki/Particle_swarm_optimization)
for references to the inertia, cogRate, and socRate parameters.
Parameters:
-----------------------------------------------------------------------
min: min allowed value of position
max: max allowed value of position
stepSize: if not None, the position must be at min + N * stepSize,
where N is an integer
inertia: The inertia for the particle.
cogRate: This parameter controls how much the particle is affected
by its distance from it's local best position
socRate: This parameter controls how much the particle is affected
by its distance from the global best position
"""
super(PermuteFloat, self).__init__()
self.min = min
self.max = max
self.stepSize = stepSize
# The particle's initial position and velocity.
self._position = (self.max + self.min) / 2.0
self._velocity = (self.max - self.min) / 5.0
# The inertia, cognitive, and social components of the particle
self._inertia = (float(Configuration.get("nupic.hypersearch.inertia"))
if inertia is None else inertia)
self._cogRate = (float(Configuration.get("nupic.hypersearch.cogRate"))
if cogRate is None else cogRate)
self._socRate = (float(Configuration.get("nupic.hypersearch.socRate"))
if socRate is None else socRate)
# The particle's local best position and the best global position.
self._bestPosition = self.getPosition()
self._bestResult = None
def __repr__(self):
"""See comments in base class."""
return ("PermuteFloat(min=%f, max=%f, stepSize=%s) [position=%f(%f), "
"velocity=%f, _bestPosition=%s, _bestResult=%s]" % (
self.min, self.max, self.stepSize, self.getPosition(),
self._position, self._velocity, self._bestPosition,
self._bestResult))
def getState(self):
"""See comments in base class."""
return dict(_position = self._position,
position = self.getPosition(),
velocity = self._velocity,
bestPosition = self._bestPosition,
bestResult = self._bestResult)
def setState(self, state):
"""See comments in base class."""
self._position = state['_position']
self._velocity = state['velocity']
self._bestPosition = state['bestPosition']
self._bestResult = state['bestResult']
def getPosition(self):
"""See comments in base class."""
if self.stepSize is None:
return self._position
# Find nearest step
numSteps = (self._position - self.min) / self.stepSize
numSteps = int(round(numSteps))
position = self.min + (numSteps * self.stepSize)
position = max(self.min, position)
position = min(self.max, position)
return position
def agitate(self):
"""See comments in base class."""
# Increase velocity enough that it will be higher the next time
# newPosition() is called. We know that newPosition multiplies by inertia,
# so take that into account.
self._velocity *= 1.5 / self._inertia
# Clip velocity
maxV = (self.max - self.min)/2
if self._velocity > maxV:
self._velocity = maxV
elif self._velocity < -maxV:
self._velocity = -maxV
# if we at the max or min, reverse direction
if self._position == self.max and self._velocity > 0:
self._velocity *= -1
if self._position == self.min and self._velocity < 0:
self._velocity *= -1
def newPosition(self, globalBestPosition, rng):
"""See comments in base class."""
# First, update the velocity. The new velocity is given as:
# v = (inertia * v) + (cogRate * r1 * (localBest-pos))
# + (socRate * r2 * (globalBest-pos))
#
# where r1 and r2 are random numbers between 0 and 1.0
lb=float(Configuration.get("nupic.hypersearch.randomLowerBound"))
ub=float(Configuration.get("nupic.hypersearch.randomUpperBound"))
self._velocity = (self._velocity * self._inertia + rng.uniform(lb, ub) *
self._cogRate * (self._bestPosition - self.getPosition()))
if globalBestPosition is not None:
self._velocity += rng.uniform(lb, ub) * self._socRate * (
globalBestPosition - self.getPosition())
# update position based on velocity
self._position += self._velocity
# Clip it
self._position = max(self.min, self._position)
self._position = min(self.max, self._position)
# Return it
return self.getPosition()
def pushAwayFrom(self, otherPositions, rng):
"""See comments in base class."""
# If min and max are the same, nothing to do
if self.max == self.min:
return
# How many potential other positions to evaluate?
numPositions = len(otherPositions) * 4
if numPositions == 0:
return
# Assign a weight to each potential position based on how close it is
# to other particles.
stepSize = float(self.max-self.min) / numPositions
positions = numpy.arange(self.min, self.max + stepSize, stepSize)
# Get rid of duplicates.
numPositions = len(positions)
weights = numpy.zeros(numPositions)
# Assign a weight to each potential position, based on a gaussian falloff
# from each existing variable. The weight of a variable to each potential
# position is given as:
# e ^ -(dist^2/stepSize^2)
maxDistanceSq = -1 * (stepSize ** 2)
for pos in otherPositions:
distances = pos - positions
varWeights = numpy.exp(numpy.power(distances, 2) / maxDistanceSq)
weights += varWeights
# Put this particle at the position with smallest weight.
positionIdx = weights.argmin()
self._position = positions[positionIdx]
# Set its best position to this.
self._bestPosition = self.getPosition()
# Give it a random direction.
self._velocity *= rng.choice([1, -1])
def resetVelocity(self, rng):
"""See comments in base class."""
maxVelocity = (self.max - self.min) / 5.0
self._velocity = maxVelocity #min(abs(self._velocity), maxVelocity)
self._velocity *= rng.choice([1, -1])
class PermuteInt(PermuteFloat):
"""Define a permutation variable which can take on integer values."""
def __init__(self, min, max, stepSize=1, inertia=None, cogRate=None,
socRate=None):
super(PermuteInt, self).__init__(min, max, stepSize, inertia=inertia,
cogRate=cogRate, socRate=socRate)
def __repr__(self):
"""See comments in base class."""
return ("PermuteInt(min=%d, max=%d, stepSize=%d) [position=%d(%f), "
"velocity=%f, _bestPosition=%s, _bestResult=%s]" % (
self.min, self.max, self.stepSize, self.getPosition(),
self._position, self._velocity, self._bestPosition,
self._bestResult))
def getPosition(self):
"""See comments in base class."""
position = super(PermuteInt, self).getPosition()
position = int(round(position))
return position
class PermuteChoices(PermuteVariable):
"""Define a permutation variable which can take on discrete choices."""
def __init__(self, choices, fixEarly=False):
super(PermuteChoices, self).__init__()
self.choices = choices
self._positionIdx = 0
# Keep track of the results obtained for each choice
self._resultsPerChoice = [[]] * len(self.choices)
# The particle's local best position and the best global position
self._bestPositionIdx = self._positionIdx
self._bestResult = None
# If this is true then we only return the best position for this encoder
# after all choices have been seen.
self._fixEarly = fixEarly
# Factor that affects how quickly we assymptote to simply choosing the
# choice with the best error value
self._fixEarlyFactor = .7
def __repr__(self):
"""See comments in base class."""
return "PermuteChoices(choices=%s) [position=%s]" % (self.choices,
self.choices[self._positionIdx])
def getState(self):
"""See comments in base class."""
return dict(_position = self.getPosition(),
position = self.getPosition(),
velocity = None,
bestPosition = self.choices[self._bestPositionIdx],
bestResult = self._bestResult)
def setState(self, state):
"""See comments in base class."""
self._positionIdx = self.choices.index(state['_position'])
self._bestPositionIdx = self.choices.index(state['bestPosition'])
self._bestResult = state['bestResult']
def setResultsPerChoice(self, resultsPerChoice):
"""Setup our resultsPerChoice history based on the passed in
resultsPerChoice.
For example, if this variable has the following choices:
['a', 'b', 'c']
resultsPerChoice will have up to 3 elements, each element is a tuple
containing (choiceValue, errors) where errors is the list of errors
received from models that used the specific choice:
retval:
[('a', [0.1, 0.2, 0.3]), ('b', [0.5, 0.1, 0.6]), ('c', [0.2])]
"""
# Keep track of the results obtained for each choice.
self._resultsPerChoice = [[]] * len(self.choices)
for (choiceValue, values) in resultsPerChoice:
choiceIndex = self.choices.index(choiceValue)
self._resultsPerChoice[choiceIndex] = list(values)
def getPosition(self):
"""See comments in base class."""
return self.choices[self._positionIdx]
def agitate(self):
"""See comments in base class."""
# Not sure what to do for choice variables....
# TODO: figure this out
pass
def newPosition(self, globalBestPosition, rng):
"""See comments in base class."""
# Compute the mean score per choice.
numChoices = len(self.choices)
meanScorePerChoice = []
overallSum = 0
numResults = 0
for i in range(numChoices):
if len(self._resultsPerChoice[i]) > 0:
data = numpy.array(self._resultsPerChoice[i])
meanScorePerChoice.append(data.mean())
overallSum += data.sum()
numResults += data.size
else:
meanScorePerChoice.append(None)
if numResults == 0:
overallSum = 1.0
numResults = 1
# For any choices we don't have a result for yet, set to the overall mean.
for i in range(numChoices):
if meanScorePerChoice[i] is None:
meanScorePerChoice[i] = overallSum / numResults
# Now, pick a new choice based on the above probabilities. Note that the
# best result is the lowest result. We want to make it more likely to
# pick the choice that produced the lowest results. So, we need to invert
# the scores (someLargeNumber - score).
meanScorePerChoice = numpy.array(meanScorePerChoice)
# Invert meaning.
meanScorePerChoice = (1.1 * meanScorePerChoice.max()) - meanScorePerChoice
# If you want the scores to quickly converge to the best choice, raise the
# results to a power. This will cause lower scores to become lower
# probability as you see more results, until it eventually should
# assymptote to only choosing the best choice.
if self._fixEarly:
meanScorePerChoice **= (numResults * self._fixEarlyFactor / numChoices)
# Normalize.
total = meanScorePerChoice.sum()
if total == 0:
total = 1.0
meanScorePerChoice /= total
# Get distribution and choose one based on those probabilities.
distribution = meanScorePerChoice.cumsum()
r = rng.random() * distribution[-1]
choiceIdx = numpy.where(r <= distribution)[0][0]
self._positionIdx = choiceIdx
return self.getPosition()
def pushAwayFrom(self, otherPositions, rng):
"""See comments in base class."""
# Get the count of how many in each position
positions = [self.choices.index(x) for x in otherPositions]
positionCounts = [0] * len(self.choices)
for pos in positions:
positionCounts[pos] += 1
self._positionIdx = numpy.array(positionCounts).argmin()
self._bestPositionIdx = self._positionIdx
def resetVelocity(self, rng):
"""See comments in base class."""
pass
class PermuteEncoder(PermuteVariable):
""" A permutation variable that defines a field encoder. This serves as
a container for the encoder constructor arguments.
"""
def __init__(self, fieldName, encoderClass, name=None, **kwArgs):
super(PermuteEncoder, self).__init__()
self.fieldName = fieldName
if name is None:
name = fieldName
self.name = name
self.encoderClass = encoderClass
# Possible values in kwArgs include: w, n, minval, maxval, etc.
self.kwArgs = dict(kwArgs)
def __repr__(self):
"""See comments in base class."""
suffix = ""
for key, value in self.kwArgs.items():
suffix += "%s=%s, " % (key, value)
return "PermuteEncoder(fieldName=%s, encoderClass=%s, name=%s, %s)" % (
(self.fieldName, self.encoderClass, self.name, suffix))
def getDict(self, encoderName, flattenedChosenValues):
""" Return a dict that can be used to construct this encoder. This dict
can be passed directly to the addMultipleEncoders() method of the
multi encoder.
Parameters:
----------------------------------------------------------------------
encoderName: name of the encoder
flattenedChosenValues: dict of the flattened permutation variables. Any
variables within this dict whose key starts
with encoderName will be substituted for
encoder constructor args which are being
permuted over.
"""
encoder = dict(fieldname=self.fieldName,
name=self.name)
# Get the position of each encoder argument
for encoderArg, value in self.kwArgs.iteritems():
# If a permuted variable, get its chosen value.
if isinstance(value, PermuteVariable):
value = flattenedChosenValues["%s:%s" % (encoderName, encoderArg)]
encoder[encoderArg] = value
# Special treatment for DateEncoder timeOfDay and dayOfWeek stuff. In the
# permutations file, the class can be one of:
# DateEncoder.timeOfDay
# DateEncoder.dayOfWeek
# DateEncoder.season
# If one of these, we need to intelligently set the constructor args.
if '.' in self.encoderClass:
(encoder['type'], argName) = self.encoderClass.split('.')
argValue = (encoder['w'], encoder['radius'])
encoder[argName] = argValue
encoder.pop('w')
encoder.pop('radius')
else:
encoder['type'] = self.encoderClass
return encoder
class Tests(object):
def _testValidPositions(self, varClass, minValue, maxValue, stepSize,
iterations=100):
"""Run a bunch of iterations on a PermuteVar and collect which positions
were visited. Verify that they were all valid.
"""
positions = set()
cogRate = 2.0
socRate = 2.0
inertia = None
gBestPosition = maxValue
lBestPosition = minValue
foundBestPosition = None
foundBestResult = None
rng = random.Random()
rng.seed(42)
var = varClass(min=minValue, max=maxValue, stepSize=stepSize,
inertia=inertia, cogRate=cogRate, socRate=socRate)
for _ in xrange(iterations):
pos = var.getPosition()
if self.verbosity >= 1:
print "pos: %f" % (pos),
if self.verbosity >= 2:
print var
positions.add(pos)
# Set the result so that the local best is at lBestPosition.
result = 1.0 - abs(pos - lBestPosition)
if foundBestResult is None or result > foundBestResult:
foundBestResult = result
foundBestPosition = pos
state = var.getState()
state['bestPosition'] = foundBestPosition
state['bestResult'] = foundBestResult
var.setState(state)
var.newPosition(gBestPosition, rng)
positions = sorted(positions)
print "Positions visited (%d):" % (len(positions)), positions
# Validate positions.
assert (max(positions) <= maxValue)
assert (min(positions) <= minValue)
assert (len(positions)) <= int(round((maxValue - minValue)/stepSize)) + 1
def _testConvergence(self, varClass, minValue, maxValue, targetValue,
iterations=100):
"""Test that we can converge on the right answer."""
gBestPosition = targetValue
lBestPosition = targetValue
foundBestPosition = None
foundBestResult = None
rng = random.Random()
rng.seed(42)
var = varClass(min=minValue, max=maxValue)
for _ in xrange(iterations):
pos = var.getPosition()
if self.verbosity >= 1:
print "pos: %f" % (pos),
if self.verbosity >= 2:
print var
# Set the result so that the local best is at lBestPosition.
result = 1.0 - abs(pos - lBestPosition)
if foundBestResult is None or result > foundBestResult:
foundBestResult = result
foundBestPosition = pos
state = var.getState()
state['bestPosition'] = foundBestPosition
state['bestResult'] = foundBestResult
var.setState(state)
var.newPosition(gBestPosition, rng)
# Test that we reached the target.
print "Target: %f, Converged on: %f" % (targetValue, pos)
assert abs(pos-targetValue) < 0.001
def _testChoices(self):
pc = PermuteChoices(['0', '1', '2', '3'])
counts = [0] * 4
rng = random.Random()
rng.seed(42)
# Check the without results the choices are chosen uniformly.
for _ in range(1000):
pos = int(pc.newPosition(None, rng))
counts[pos] += 1
for count in counts:
assert count < 270 and count > 230
print "No results permuteChoice test passed"
# Check that with some results the choices are chosen with the lower
# errors being chosen more often.
choices = ['1', '11', '21', '31']
pc = PermuteChoices(choices)
resultsPerChoice = []
counts = dict()
for choice in choices:
resultsPerChoice.append((choice, [float(choice)]))
counts[choice] = 0
pc.setResultsPerChoice(resultsPerChoice)
rng = random.Random()
rng.seed(42)
# Check the without results the choices are chosen uniformly.
for _ in range(1000):
choice = pc.newPosition(None, rng)
counts[choice] += 1
# Make sure that as the error goes up, the number of times the choice is
# seen goes down.
prevCount = 1001
for choice in choices:
assert prevCount > counts[choice]
prevCount = counts[choice]
print "Results permuteChoice test passed"
# Check that with fixEarly as you see more data points you begin heavily
# biasing the probabilities to the one with the lowest error.
choices = ['1', '11', '21', '31']
pc = PermuteChoices(choices, fixEarly=True)
resultsPerChoiceDict = dict()
counts = dict()
for choice in choices:
resultsPerChoiceDict[choice] = (choice, [])
counts[choice] = 0
# The count of the highest probability entry, this should go up as more
# results are seen.
prevLowestErrorCount = 0
for _ in range(10):
for choice in choices:
resultsPerChoiceDict[choice][1].append(float(choice))
counts[choice] = 0
pc.setResultsPerChoice(resultsPerChoiceDict.values())
rng = random.Random()
rng.seed(42)
# Check the without results the choices are chosen uniformly.
for _ in range(1000):
choice = pc.newPosition(None, rng)
counts[choice] += 1
# Make sure that as the error goes up, the number of times the choice is
# seen goes down.
assert prevLowestErrorCount < counts['1']
prevLowestErrorCount = counts['1']
print "Fix early permuteChoice test passed"
def run(self):
"""Run unit tests on this module."""
# Set the verbosity level.
self.verbosity = 0
# ------------------------------------------------------------------------
# Test that step size is handled correctly for floats
self._testValidPositions(varClass=PermuteFloat, minValue=2.1,
maxValue=5.1, stepSize=0.5)
# ------------------------------------------------------------------------
# Test that step size is handled correctly for ints
self._testValidPositions(varClass=PermuteInt, minValue=2,
maxValue=11, stepSize=3)
# ------------------------------------------------------------------------
# Test that step size is handled correctly for ints
self._testValidPositions(varClass=PermuteInt, minValue=2,
maxValue=11, stepSize=1)
# ------------------------------------------------------------------------
# Test that we can converge on a target value
# Using Float
self._testConvergence(varClass=PermuteFloat, minValue=2.1,
maxValue=5.1, targetValue=5.0)
self._testConvergence(varClass=PermuteFloat, minValue=2.1,
maxValue=5.1, targetValue=2.2)
self._testConvergence(varClass=PermuteFloat, minValue=2.1,
maxValue=5.1, targetValue=3.5)
# Using int
self._testConvergence(varClass=PermuteInt, minValue=1,
maxValue=20, targetValue=19)
self._testConvergence(varClass=PermuteInt, minValue=1,
maxValue=20, targetValue=1)
#test permute choices
self._testChoices()
if __name__ == '__main__':
# Run all tests
tests = Tests()
tests.run()
| 26,051 | Python | .py | 590 | 37.455932 | 80 | 0.644493 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,029 | extended_logger.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/extended_logger.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import logging
import copy
class ExtendedLogger(logging.Logger):
""" Extends the log message by appending custom parameters
"""
__logPrefix = ''
def __init__(self, level):
self._baseLogger = logging.Logger
self._baseLogger.__init__(self, level)
@staticmethod
def setLogPrefix(logPrefix):
ExtendedLogger.__logPrefix = copy.deepcopy(logPrefix)
def getExtendedMsg(self, msg):
extendedMsg = '%s' % (ExtendedLogger.__logPrefix) + msg
return extendedMsg
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
self._baseLogger.debug(self, self.getExtendedMsg(msg), *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
self._baseLogger.info(self, self.getExtendedMsg(msg), *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
self._baseLogger.warning(self, self.getExtendedMsg(msg), *args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
self._baseLogger.error(self, self.getExtendedMsg(msg), *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
self._baseLogger.critical(self, self.getExtendedMsg(msg), *args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
self._baseLogger.log(self, level, self.getExtendedMsg(msg), *args,
**kwargs)
def test():
pass
if __name__ == "__main__":
test()
| 3,751 | Python | .py | 90 | 37.3 | 78 | 0.662707 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,030 | __init__.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2012-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 981 | Python | .py | 20 | 48.05 | 72 | 0.665973 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,031 | hs_state.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/hs_state.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import time
import json
import itertools
import pprint
from operator import itemgetter
import numpy
class HsState(object):
"""This class encapsulates the Hypersearch state which we share with all
other workers. This state gets serialized into a JSON dict and written to
the engWorkerState field of the job record.
Whenever a worker changes this state, it does an atomic setFieldIfEqual to
insure it has the latest state as updated by any other worker as a base.
Here is an example snapshot of this state information:
swarms = {'a': {'status': 'completed', # 'active','completing','completed',
# or 'killed'
'bestModelId': <modelID>, # Only set for 'completed' swarms
'bestErrScore': <errScore>, # Only set for 'completed' swarms
'sprintIdx': 0,
},
'a.b': {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 1,
}
}
sprints = [{'status': 'completed', # 'active','completing','completed'
'bestModelId': <modelID>, # Only set for 'completed' sprints
'bestErrScore': <errScore>, # Only set for 'completed' sprints
},
{'status': 'completing',
'bestModelId': <None>,
'bestErrScore': <None>
}
{'status': 'active',
'bestModelId': None
'bestErrScore': None
}
]
"""
def __init__(self, hsObj):
""" Create our state object.
Parameters:
---------------------------------------------------------------------
hsObj: Reference to the HypersesarchV2 instance
cjDAO: ClientJobsDAO instance
logger: logger to use
jobID: our JobID
"""
# Save constructor parameters
self._hsObj = hsObj
# Convenient access to the logger
self.logger = self._hsObj.logger
# This contains our current state, and local working changes
self._state = None
# This contains the state we last read from the database
self._priorStateJSON = None
# Set when we make a change to our state locally
self._dirty = False
# Read in the initial state
self.readStateFromDB()
def isDirty(self):
"""Return true if our local copy of the state has changed since the
last time we read from the DB.
"""
return self._dirty
def isSearchOver(self):
"""Return true if the search should be considered over."""
return self._state['searchOver']
def readStateFromDB(self):
"""Set our state to that obtained from the engWorkerState field of the
job record.
Parameters:
---------------------------------------------------------------------
stateJSON: JSON encoded state from job record
"""
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
# Init if no prior state yet
if self._priorStateJSON is None:
swarms = dict()
# Fast Swarm, first and only sprint has one swarm for each field
# in fixedFields
if self._hsObj._fixedFields is not None:
print self._hsObj._fixedFields
encoderSet = []
for field in self._hsObj._fixedFields:
if field =='_classifierInput':
continue
encoderName = self.getEncoderKeyFromName(field)
assert encoderName in self._hsObj._encoderNames, "The field '%s' " \
" specified in the fixedFields list is not present in this " \
" model." % (field)
encoderSet.append(encoderName)
encoderSet.sort()
swarms['.'.join(encoderSet)] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Temporal prediction search, first sprint has N swarms of 1 field each,
# the predicted field may or may not be that one field.
elif self._hsObj._searchType == HsSearchType.temporal:
for encoderName in self._hsObj._encoderNames:
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Classification prediction search, first sprint has N swarms of 1 field
# each where this field can NOT be the predicted field.
elif self._hsObj._searchType == HsSearchType.classification:
for encoderName in self._hsObj._encoderNames:
if encoderName == self._hsObj._predictedFieldEncoder:
continue
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Legacy temporal. This is either a model that uses reconstruction or
# an older multi-step model that doesn't have a separate
# 'classifierOnly' encoder for the predicted field. Here, the predicted
# field must ALWAYS be present and the first sprint tries the predicted
# field only
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
swarms[self._hsObj._predictedFieldEncoder] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
else:
raise RuntimeError("Unsupported search type: %s" % \
(self._hsObj._searchType))
# Initialize the state.
self._state = dict(
# The last time the state was updated by a worker.
lastUpdateTime = time.time(),
# Set from within setSwarmState() if we detect that the sprint we just
# completed did worse than a prior sprint. This stores the index of
# the last good sprint.
lastGoodSprint = None,
# Set from within setSwarmState() if lastGoodSprint is True and all
# sprints have completed.
searchOver = False,
# This is a summary of the active swarms - this information can also
# be obtained from the swarms entry that follows, but is summarized here
# for easier reference when viewing the state as presented by
# log messages and prints of the hsState data structure (by
# permutations_runner).
activeSwarms = swarms.keys(),
# All the swarms that have been created so far.
swarms = swarms,
# All the sprints that have completed or are in progress.
sprints = [{'status': 'active',
'bestModelId': None,
'bestErrScore': None}],
# The list of encoders we have "blacklisted" because they
# performed so poorly.
blackListedEncoders = [],
)
# This will do nothing if the value of engWorkerState is not still None.
self._hsObj._cjDAO.jobSetFieldIfEqual(
self._hsObj._jobID, 'engWorkerState', json.dumps(self._state), None)
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(
self._hsObj._jobID, ['engWorkerState'])[0]
assert (self._priorStateJSON is not None)
# Read state from the database
self._state = json.loads(self._priorStateJSON)
self._dirty = False
def writeStateToDB(self):
"""Update the state in the job record with our local changes (if any).
If we don't have the latest state in our priorStateJSON, then re-load
in the latest state and return False. If we were successful writing out
our changes, return True
Parameters:
---------------------------------------------------------------------
retval: True if we were successful writing out our changes
False if our priorState is not the latest that was in the DB.
In this case, we will re-load our state from the DB
"""
# If no changes, do nothing
if not self._dirty:
return True
# Set the update time
self._state['lastUpdateTime'] = time.time()
newStateJSON = json.dumps(self._state)
success = self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID,
'engWorkerState', str(newStateJSON), str(self._priorStateJSON))
if success:
self.logger.debug("Success changing hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = newStateJSON
# If no success, read in the current state from the DB
else:
self.logger.debug("Failed to change hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
self._state = json.loads(self._priorStateJSON)
self.logger.info("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._state, indent=4)))
return success
def getEncoderNameFromKey(self, key):
""" Given an encoder dictionary key, get the encoder name.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return key.split('|')[-1]
def getEncoderKeyFromName(self, name):
""" Given an encoder name, get the key.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return 'modelParams|sensorParams|encoders|%s' % (name)
def getFieldContributions(self):
"""Return the field contributions statistics.
Parameters:
---------------------------------------------------------------------
retval: Dictionary where the keys are the field names and the values
are how much each field contributed to the best score.
"""
#in the fast swarm, there is only 1 sprint and field contributions are
#not defined
if self._hsObj._fixedFields is not None:
return dict(), dict()
# Get the predicted field encoder name
predictedEncoderName = self._hsObj._predictedFieldEncoder
# -----------------------------------------------------------------------
# Collect all the single field scores
fieldScores = []
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 1:
continue
field = self.getEncoderNameFromKey(encodersUsed[0])
bestScore = info['bestErrScore']
# If the bestScore is None, this swarm hasn't completed yet (this could
# happen if we're exiting because of maxModels), so look up the best
# score so far
if bestScore is None:
(_modelId, bestScore) = \
self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
fieldScores.append((bestScore, field))
# -----------------------------------------------------------------------
# If we only have 1 field that was tried in the first sprint, then use that
# as the base and get the contributions from the fields in the next sprint.
if self._hsObj._searchType == HsSearchType.legacyTemporal:
assert(len(fieldScores)==1)
(baseErrScore, baseField) = fieldScores[0]
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 2:
continue
fields = [self.getEncoderNameFromKey(name) for name in encodersUsed]
fields.remove(baseField)
fieldScores.append((info['bestErrScore'], fields[0]))
# The first sprint tried a bunch of fields, pick the worst performing one
# (within the top self._hsObj._maxBranching ones) as the base
else:
fieldScores.sort(reverse=True)
# If maxBranching was specified, pick the worst performing field within
# the top maxBranching+1 fields as our base, which will give that field
# a contribution of 0.
if self._hsObj._maxBranching > 0 \
and len(fieldScores) > self._hsObj._maxBranching:
baseErrScore = fieldScores[-self._hsObj._maxBranching-1][0]
else:
baseErrScore = fieldScores[0][0]
# -----------------------------------------------------------------------
# Prepare and return the fieldContributions dict
pctFieldContributionsDict = dict()
absFieldContributionsDict = dict()
# If we have no base score, can't compute field contributions. This can
# happen when we exit early due to maxModels or being cancelled
if baseErrScore is not None:
# If the base error score is 0, we can't compute a percent difference
# off of it, so move it to a very small float
if abs(baseErrScore) < 0.00001:
baseErrScore = 0.00001
for (errScore, field) in fieldScores:
if errScore is not None:
pctBetter = (baseErrScore - errScore) * 100.0 / baseErrScore
else:
pctBetter = 0.0
errScore = baseErrScore # for absFieldContribution
pctFieldContributionsDict[field] = pctBetter
absFieldContributionsDict[field] = baseErrScore - errScore
self.logger.debug("FieldContributions: %s" % (pctFieldContributionsDict))
return pctFieldContributionsDict, absFieldContributionsDict
def getAllSwarms(self, sprintIdx):
"""Return the list of all swarms in the given sprint.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx:
swarmIds.append(swarmId)
return swarmIds
def getActiveSwarms(self, sprintIdx=None):
"""Return the list of active swarms in the given sprint. These are swarms
which still need new particles created in them.
Parameters:
---------------------------------------------------------------------
sprintIdx: which sprint to query. If None, get active swarms from all
sprints
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if sprintIdx is not None and info['sprintIdx'] != sprintIdx:
continue
if info['status'] == 'active':
swarmIds.append(swarmId)
return swarmIds
def getNonKilledSwarms(self, sprintIdx):
"""Return the list of swarms in the given sprint that were not killed.
This is called when we are trying to figure out which encoders to carry
forward to the next sprint. We don't want to carry forward encoder
combintations which were obviously bad (in killed swarms).
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx and info['status'] != 'killed':
swarmIds.append(swarmId)
return swarmIds
def getCompletedSwarms(self):
"""Return the list of all completed swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completed':
swarmIds.append(swarmId)
return swarmIds
def getCompletingSwarms(self):
"""Return the list of all completing swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completing':
swarmIds.append(swarmId)
return swarmIds
def bestModelInCompletedSwarm(self, swarmId):
"""Return the best model ID and it's errScore from the given swarm.
If the swarm has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
swarmInfo = self._state['swarms'][swarmId]
return (swarmInfo['bestModelId'],
swarmInfo['bestErrScore'])
def bestModelInCompletedSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint.
If the sprint has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
sprintInfo = self._state['sprints'][sprintIdx]
return (sprintInfo['bestModelId'],
sprintInfo['bestErrScore'])
def bestModelInSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint,
which may still be in progress. This returns the best score from all models
in the sprint which have matured so far.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
# Get all the swarms in this sprint
swarms = self.getAllSwarms(sprintIdx)
# Get the best model and score from each swarm
bestModelId = None
bestErrScore = numpy.inf
for swarmId in swarms:
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
if errScore < bestErrScore:
bestModelId = modelId
bestErrScore = errScore
return (bestModelId, bestErrScore)
def setSwarmState(self, swarmId, newStatus):
"""Change the given swarm's state to 'newState'. If 'newState' is
'completed', then bestModelId and bestErrScore must be provided.
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id
newStatus: new status, either 'active', 'completing', 'completed', or
'killed'
"""
assert (newStatus in ['active', 'completing', 'completed', 'killed'])
# Set the swarm status
swarmInfo = self._state['swarms'][swarmId]
if swarmInfo['status'] == newStatus:
return
# If some other worker noticed it as completed, setting it to completing
# is obviously old information....
if swarmInfo['status'] == 'completed' and newStatus == 'completing':
return
self._dirty = True
swarmInfo['status'] = newStatus
if newStatus == 'completed':
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
swarmInfo['bestModelId'] = modelId
swarmInfo['bestErrScore'] = errScore
# If no longer active, remove it from the activeSwarms entry
if newStatus != 'active' and swarmId in self._state['activeSwarms']:
self._state['activeSwarms'].remove(swarmId)
# If new status is 'killed', kill off any running particles in that swarm
if newStatus=='killed':
self._hsObj.killSwarmParticles(swarmId)
# In case speculative particles are enabled, make sure we generate a new
# swarm at this time if all of the swarms in the current sprint have
# completed. This will insure that we don't mark the sprint as completed
# before we've created all the possible swarms.
sprintIdx = swarmInfo['sprintIdx']
self.isSprintActive(sprintIdx)
# Update the sprint status. Check all the swarms that belong to this sprint.
# If they are all completed, the sprint is completed.
sprintInfo = self._state['sprints'][sprintIdx]
statusCounts = dict(active=0, completing=0, completed=0, killed=0)
bestModelIds = []
bestErrScores = []
for info in self._state['swarms'].itervalues():
if info['sprintIdx'] != sprintIdx:
continue
statusCounts[info['status']] += 1
if info['status'] == 'completed':
bestModelIds.append(info['bestModelId'])
bestErrScores.append(info['bestErrScore'])
if statusCounts['active'] > 0:
sprintStatus = 'active'
elif statusCounts['completing'] > 0:
sprintStatus = 'completing'
else:
sprintStatus = 'completed'
sprintInfo['status'] = sprintStatus
# If the sprint is complete, get the best model from all of its swarms and
# store that as the sprint best
if sprintStatus == 'completed':
if len(bestErrScores) > 0:
whichIdx = numpy.array(bestErrScores).argmin()
sprintInfo['bestModelId'] = bestModelIds[whichIdx]
sprintInfo['bestErrScore'] = bestErrScores[whichIdx]
else:
# This sprint was empty, most likely because all particles were
# killed. Give it a huge error score
sprintInfo['bestModelId'] = 0
sprintInfo['bestErrScore'] = numpy.inf
# See if our best err score got NO BETTER as compared to a previous
# sprint. If so, stop exploring subsequent sprints (lastGoodSprint
# is no longer None).
bestPrior = numpy.inf
for idx in range(sprintIdx):
if self._state['sprints'][idx]['status'] == 'completed':
(_, errScore) = self.bestModelInCompletedSprint(idx)
if errScore is None:
errScore = numpy.inf
else:
errScore = numpy.inf
if errScore < bestPrior:
bestPrior = errScore
if sprintInfo['bestErrScore'] >= bestPrior:
self._state['lastGoodSprint'] = sprintIdx-1
# If ALL sprints up to the last good one are done, the search is now over
if self._state['lastGoodSprint'] is not None \
and not self.anyGoodSprintsActive():
self._state['searchOver'] = True
def anyGoodSprintsActive(self):
"""Return True if there are any more good sprints still being explored.
A 'good' sprint is one that is earlier than where we detected an increase
in error from sprint to subsequent sprint.
"""
if self._state['lastGoodSprint'] is not None:
goodSprints = self._state['sprints'][0:self._state['lastGoodSprint']+1]
else:
goodSprints = self._state['sprints']
for sprint in goodSprints:
if sprint['status'] == 'active':
anyActiveSprints = True
break
else:
anyActiveSprints = False
return anyActiveSprints
def isSprintCompleted(self, sprintIdx):
"""Return True if the given sprint has completed."""
numExistingSprints = len(self._state['sprints'])
if sprintIdx >= numExistingSprints:
return False
return (self._state['sprints'][sprintIdx]['status'] == 'completed')
def killUselessSwarms(self):
"""See if we can kill off some speculative swarms. If an earlier sprint
has finally completed, we can now tell which fields should *really* be present
in the sprints we've already started due to speculation, and kill off the
swarms that should not have been included.
"""
# Get number of existing sprints
numExistingSprints = len(self._state['sprints'])
# Should we bother killing useless swarms?
if self._hsObj._searchType == HsSearchType.legacyTemporal:
if numExistingSprints <= 2:
return
else:
if numExistingSprints <= 1:
return
# Form completedSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# ex. completedSwarms:
# [('a', {...}, 1.4),
# ('b', {...}, 2.0),
# ('c', {...}, 3.0)]
completedSwarms = self.getCompletedSwarms()
completedSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in completedSwarms]
# Form the completedMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore).
# ex. completedMatrix:
# [(('a', {...}, 1.4), ('b', {...}, 2.0), ('c', {...}, 3.0)),
# (('a.b', {...}, 3.0), ('b.c', {...}, 4.0))]
completedMatrix = [[] for i in range(numExistingSprints)]
for swarm in completedSwarms:
completedMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in completedMatrix:
sprint.sort(key=itemgetter(2))
# Form activeSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# Include all activeSwarms and completingSwarms
# ex. activeSwarms:
# [('d', {...}, 1.4),
# ('e', {...}, 2.0),
# ('f', {...}, 3.0)]
activeSwarms = self.getActiveSwarms()
# Append the completing swarms
activeSwarms.extend(self.getCompletingSwarms())
activeSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in activeSwarms]
# Form the activeMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore)
# ex. activeMatrix:
# [(('d', {...}, 1.4), ('e', {...}, 2.0), ('f', {...}, 3.0)),
# (('d.e', {...}, 3.0), ('e.f', {...}, 4.0))]
activeMatrix = [[] for i in range(numExistingSprints)]
for swarm in activeSwarms:
activeMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in activeMatrix:
sprint.sort(key=itemgetter(2))
# Figure out which active swarms to kill
toKill = []
for i in range(1, numExistingSprints):
for swarm in activeMatrix[i]:
curSwarmEncoders = swarm[0].split(".")
# If previous sprint is complete, get the best swarm and kill all active
# sprints that are not supersets
if(len(activeMatrix[i-1])==0):
# If we are trying all possible 3 field combinations, don't kill any
# off in sprint 2
if i==2 and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
pass
else:
bestInPrevious = completedMatrix[i-1][0]
bestEncoders = bestInPrevious[0].split('.')
for encoder in bestEncoders:
if not encoder in curSwarmEncoders:
toKill.append(swarm)
# if there are more than two completed encoders sets that are complete and
# are worse than at least one active swarm in the previous sprint. Remove
# any combinations that have any pair of them since they cannot have the best encoder.
#elif(len(completedMatrix[i-1])>1):
# for completedSwarm in completedMatrix[i-1]:
# activeMatrix[i-1][0][2]<completed
# Mark the bad swarms as killed
if len(toKill) > 0:
print "ParseMe: Killing encoders:" + str(toKill)
for swarm in toKill:
self.setSwarmState(swarm[0], "killed")
return
def isSprintActive(self, sprintIdx):
"""If the given sprint exists and is active, return active=True.
If the sprint does not exist yet, this call will create it (and return
active=True). If it already exists, but is completing or complete, return
active=False.
If sprintIdx is past the end of the possible sprints, return
active=False, noMoreSprints=True
IMPORTANT: When speculative particles are enabled, this call has some
special processing to handle speculative sprints:
* When creating a new speculative sprint (creating sprint N before
sprint N-1 has completed), it initially only puts in only ONE swarm into
the sprint.
* Every time it is asked if sprint N is active, it also checks to see if
it is time to add another swarm to the sprint, and adds a new swarm if
appropriate before returning active=True
* We decide it is time to add a new swarm to a speculative sprint when ALL
of the currently active swarms in the sprint have all the workers they
need (number of running (not mature) particles is _minParticlesPerSwarm).
This means that we have capacity to run additional particles in a new
swarm.
It is expected that the sprints will be checked IN ORDER from 0 on up. (It
is an error not to) The caller should always try to allocate from the first
active sprint it finds. If it can't, then it can call this again to
find/create the next active sprint.
Parameters:
---------------------------------------------------------------------
retval: (active, noMoreSprints)
active: True if the given sprint is active
noMoreSprints: True if there are no more sprints possible
"""
while True:
numExistingSprints = len(self._state['sprints'])
# If this sprint already exists, see if it is active
if sprintIdx <= numExistingSprints-1:
# With speculation off, it's simple, just return whether or not the
# asked for sprint has active status
if not self._hsObj._speculativeParticles:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
return (active, False)
# With speculation on, if the sprint is still marked active, we also
# need to see if it's time to add a new swarm to it.
else:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
if not active:
return (active, False)
# See if all of the existing swarms are at capacity (have all the
# workers they need):
activeSwarmIds = self.getActiveSwarms(sprintIdx)
swarmSizes = [self._hsObj._resultsDB.getParticleInfos(swarmId,
matured=False)[0] for swarmId in activeSwarmIds]
notFullSwarms = [len(swarm) for swarm in swarmSizes \
if len(swarm) < self._hsObj._minParticlesPerSwarm]
# If some swarms have room return that the swarm is active.
if len(notFullSwarms) > 0:
return (True, False)
# If the existing swarms are at capacity, we will fall through to the
# logic below which tries to add a new swarm to the sprint.
# Stop creating new sprints?
if self._state['lastGoodSprint'] is not None:
return (False, True)
# if fixedFields is set, we are running a fast swarm and only run sprint0
if self._hsObj._fixedFields is not None:
return (False, True)
# ----------------------------------------------------------------------
# Get the best model (if there is one) from the prior sprint. That gives
# us the base encoder set for the next sprint. For sprint zero make sure
# it does not take the last sprintidx because of wrapping.
if sprintIdx > 0 \
and self._state['sprints'][sprintIdx-1]['status'] == 'completed':
(bestModelId, _) = self.bestModelInCompletedSprint(sprintIdx-1)
(particleState, _, _, _, _) = self._hsObj._resultsDB.getParticleInfo(
bestModelId)
bestSwarmId = particleState['swarmId']
baseEncoderSets = [bestSwarmId.split('.')]
# If there is no best model yet, then use all encoder sets from the prior
# sprint that were not killed
else:
bestSwarmId = None
particleState = None
# Build up more combinations, using ALL of the sets in the current
# sprint.
baseEncoderSets = []
for swarmId in self.getNonKilledSwarms(sprintIdx-1):
baseEncoderSets.append(swarmId.split('.'))
# ----------------------------------------------------------------------
# Which encoders should we add to the current base set?
encoderAddSet = []
# If we have constraints on how many fields we carry forward into
# subsequent sprints (either nupic.hypersearch.max.field.branching or
# nupic.hypersearch.min.field.contribution was set), then be more
# picky about which fields we add in.
limitFields = False
if self._hsObj._maxBranching > 0 \
or self._hsObj._minFieldContribution >= 0:
if self._hsObj._searchType == HsSearchType.temporal or \
self._hsObj._searchType == HsSearchType.classification:
if sprintIdx >= 1:
limitFields = True
baseSprintIdx = 0
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
if sprintIdx >= 2:
limitFields = True
baseSprintIdx = 1
else:
raise RuntimeError("Unimplemented search type %s" % \
(self._hsObj._searchType))
# Only add top _maxBranching encoders to the swarms?
if limitFields:
# Get field contributions to filter added fields
pctFieldContributions, absFieldContributions = \
self.getFieldContributions()
toRemove = []
self.logger.debug("FieldContributions min: %s" % \
(self._hsObj._minFieldContribution))
for fieldname in pctFieldContributions:
if pctFieldContributions[fieldname] < self._hsObj._minFieldContribution:
self.logger.debug("FieldContributions removing: %s" % (fieldname))
toRemove.append(self.getEncoderKeyFromName(fieldname))
else:
self.logger.debug("FieldContributions keeping: %s" % (fieldname))
# Grab the top maxBranching base sprint swarms.
swarms = self._state["swarms"]
sprintSwarms = [(swarm, swarms[swarm]["bestErrScore"]) \
for swarm in swarms if swarms[swarm]["sprintIdx"] == baseSprintIdx]
sprintSwarms = sorted(sprintSwarms, key=itemgetter(1))
if self._hsObj._maxBranching > 0:
sprintSwarms = sprintSwarms[0:self._hsObj._maxBranching]
# Create encoder set to generate further swarms.
for swarm in sprintSwarms:
swarmEncoders = swarm[0].split(".")
for encoder in swarmEncoders:
if not encoder in encoderAddSet:
encoderAddSet.append(encoder)
encoderAddSet = [encoder for encoder in encoderAddSet \
if not str(encoder) in toRemove]
# If no limit on the branching or min contribution, simply use all of the
# encoders.
else:
encoderAddSet = self._hsObj._encoderNames
# -----------------------------------------------------------------------
# Build up the new encoder combinations for the next sprint.
newSwarmIds = set()
# See if the caller wants to try more extensive field combinations with
# 3 fields.
if (self._hsObj._searchType == HsSearchType.temporal \
or self._hsObj._searchType == HsSearchType.legacyTemporal) \
and sprintIdx == 2 \
and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
if self._hsObj._tryAll3FieldCombinations:
newEncoders = set(self._hsObj._encoderNames)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
else:
# Just make sure the timestamp encoders are part of the mix
newEncoders = set(encoderAddSet)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
for encoder in self._hsObj._encoderNames:
if encoder.endswith('_timeOfDay') or encoder.endswith('_weekend') \
or encoder.endswith('_dayOfWeek'):
newEncoders.add(encoder)
allCombos = list(itertools.combinations(newEncoders, 2))
for combo in allCombos:
newSet = list(combo)
newSet.append(self._hsObj._predictedFieldEncoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# Else, we only build up by adding 1 new encoder to the best combination(s)
# we've seen from the prior sprint
else:
for baseEncoderSet in baseEncoderSets:
for encoder in encoderAddSet:
if encoder not in self._state['blackListedEncoders'] \
and encoder not in baseEncoderSet:
newSet = list(baseEncoderSet)
newSet.append(encoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# ----------------------------------------------------------------------
# Sort the new swarm Ids
newSwarmIds = sorted(newSwarmIds)
# If no more swarms can be found for this sprint...
if len(newSwarmIds) == 0:
# if sprint is not an empty sprint return that it is active but do not
# add anything to it.
if len(self.getAllSwarms(sprintIdx)) > 0:
return (True, False)
# If this is an empty sprint and we couldn't find any new swarms to
# add (only bad fields are remaining), the search is over
else:
return (False, True)
# Add this sprint and the swarms that are in it to our state
self._dirty = True
# Add in the new sprint if necessary
if len(self._state["sprints"]) == sprintIdx:
self._state['sprints'].append({'status': 'active',
'bestModelId': None,
'bestErrScore': None})
# Add in the new swarm(s) to the sprint
for swarmId in newSwarmIds:
self._state['swarms'][swarmId] = {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': sprintIdx}
# Update the list of active swarms
self._state['activeSwarms'] = self.getActiveSwarms()
# Try to set new state
success = self.writeStateToDB()
# Return result if successful
if success:
return (True, False)
# No success, loop back with the updated state and try again
class HsSearchType(object):
"""This class enumerates the types of search we can perform."""
temporal = 'temporal'
legacyTemporal = 'legacyTemporal'
classification = 'classification'
| 40,745 | Python | .py | 851 | 38.854289 | 94 | 0.611271 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,032 | support.py | numenta_nupic-legacy/src/nupic/swarming/hypersearch/support.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This files contains support code for the hypersearch library.
Most of it is temporarily copied from nupic.support.
"""
from __future__ import with_statement
from copy import copy
import errno
import logging
import os
import sys
import traceback
from xml.etree import ElementTree
from pkg_resources import resource_string
import re
import keyword
import functools
### Enum
__IDENTIFIER_PATTERN = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def __isidentifier(s):
if s in keyword.kwlist:
return False
return __IDENTIFIER_PATTERN.match(s) is not None
def Enum(*args, **kwargs):
"""
Utility function for creating enumerations in python
Example Usage:
>> Color = Enum("Red", "Green", "Blue", "Magenta")
>> print Color.Red
>> 0
>> print Color.Green
>> 1
>> print Color.Blue
>> 2
>> print Color.Magenta
>> 3
>> Color.Violet
>> 'violet'
>> Color.getLabel(Color.Red)
>> 'Red'
>> Color.getLabel(2)
>> 'Blue'
"""
def getLabel(cls, val):
""" Get a string label for the current value of the enum """
return cls.__labels[val]
def validate(cls, val):
""" Returns True if val is a valid value for the enumeration """
return val in cls.__values
def getValues(cls):
""" Returns a list of all the possible values for this enum """
return list(cls.__values)
def getLabels(cls):
""" Returns a list of all possible labels for this enum """
return list(cls.__labels.values())
def getValue(cls, label):
""" Returns value given a label """
return cls.__labels[label]
for arg in list(args)+kwargs.keys():
if type(arg) is not str:
raise TypeError("Enum arg {0} must be a string".format(arg))
if not __isidentifier(arg):
raise ValueError("Invalid enum value '{0}'. "\
"'{0}' is not a valid identifier".format(arg))
#kwargs.update(zip(args, range(len(args))))
kwargs.update(zip(args, args))
newType = type("Enum", (object,), kwargs)
newType.__labels = dict( (v,k) for k,v in kwargs.iteritems())
newType.__values = set(newType.__labels.keys())
newType.getLabel = functools.partial(getLabel, newType)
newType.validate = functools.partial(validate, newType)
newType.getValues = functools.partial(getValues, newType)
newType.getLabels = functools.partial(getLabels, newType)
newType.getValue = functools.partial(getValue, newType)
return newType
def makeDirectoryFromAbsolutePath(absDirPath):
""" Makes directory for the given directory path with default permissions.
If the directory already exists, it is treated as success.
absDirPath: absolute path of the directory to create.
Returns: absDirPath arg
Exceptions: OSError if directory creation fails
"""
assert os.path.isabs(absDirPath)
try:
os.makedirs(absDirPath)
except OSError, e:
if e.errno != os.errno.EEXIST:
raise
return absDirPath
# Turn on additional print statements
DEBUG = False
DEFAULT_CONFIG = "nupic-default.xml"
USER_CONFIG = "nupic-site.xml"
CUSTOM_CONFIG = "nupic-custom.xml"
def _getLoggerBase():
logger = logging.getLogger("com.numenta.nupic.tools.configuration_base")
if DEBUG:
logger.setLevel(logging.DEBUG)
return logger
class ConfigurationBase(object):
""" This class can be used to fetch NuPic configuration settings which are
stored in one or more XML files.
If the environment variable 'NTA_CONF_PATH' is defined, then the configuration
files are expected to be in the NTA_CONF_PATH search path, which is a ':'
separated list of directories (on Windows the seperator is a ';').
If NTA_CONF_PATH is not defined, then it is loaded via pkg_resources.
"""
# Once we read in the properties, they are stored in this dict
_properties = None
# This stores the paths we search for config files. It can be modified through
# the setConfigPaths() method.
_configPaths = None
# Any environment variable prefixed with this string serves as an override
# to property defined in the current configuration
envPropPrefix = 'NTA_CONF_PROP_'
@classmethod
def getString(cls, prop):
""" Retrieve the requested property as a string. If property does not exist,
then KeyError will be raised.
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as a string
"""
if cls._properties is None:
cls._readStdConfigFiles()
# Allow configuration properties to be overridden via environment variables
envValue = os.environ.get("%s%s" % (cls.envPropPrefix,
prop.replace('.', '_')), None)
if envValue is not None:
return envValue
return cls._properties[prop]
@classmethod
def getBool(cls, prop):
""" Retrieve the requested property and return it as a bool. If property
does not exist, then KeyError will be raised. If the property value is
neither 0 nor 1, then ValueError will be raised
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as bool
"""
value = cls.getInt(prop)
if value not in (0, 1):
raise ValueError("Expected 0 or 1, but got %r in config property %s" % (
value, prop))
return bool(value)
@classmethod
def getInt(cls, prop):
""" Retrieve the requested property and return it as an int. If property
does not exist, then KeyError will be raised.
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as int
"""
return int(cls.getString(prop))
@classmethod
def getFloat(cls, prop):
""" Retrieve the requested property and return it as a float. If property
does not exist, then KeyError will be raised.
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as float
"""
return float(cls.getString(prop))
@classmethod
def get(cls, prop, default=None):
""" Get the value of the given configuration property as string. This
returns a string which is the property value, or the value of "default" arg
if the property is not found. Use Configuration.getString() instead.
NOTE: it's atypical for our configuration properties to be missing - a
missing configuration property is usually a very serious error. Because
of this, it's preferable to use one of the getString, getInt, getFloat,
etc. variants instead of get(). Those variants will raise KeyError when
an expected property is missing.
Parameters:
----------------------------------------------------------------
prop: name of the property
default: default value to return if property does not exist
retval: property value (as a string), or default if the property does
not exist.
"""
try:
return cls.getString(prop)
except KeyError:
return default
@classmethod
def set(cls, prop, value):
""" Set the value of the given configuration property.
Parameters:
----------------------------------------------------------------
prop: name of the property
value: value to set
"""
if cls._properties is None:
cls._readStdConfigFiles()
cls._properties[prop] = str(value)
@classmethod
def dict(cls):
""" Return a dict containing all of the configuration properties
Parameters:
----------------------------------------------------------------
retval: dict containing all configuration properties.
"""
if cls._properties is None:
cls._readStdConfigFiles()
# Make a copy so we can update any current values obtained from environment
# variables
result = dict(cls._properties)
keys = os.environ.keys()
replaceKeys = filter(lambda x: x.startswith(cls.envPropPrefix),
keys)
for envKey in replaceKeys:
key = envKey[len(cls.envPropPrefix):]
key = key.replace('_', '.')
result[key] = os.environ[envKey]
return result
@classmethod
def readConfigFile(cls, filename, path=None):
""" Parse the given XML file and store all properties it describes.
Parameters:
----------------------------------------------------------------
filename: name of XML file to parse (no path)
path: path of the XML file. If None, then use the standard
configuration search path.
"""
properties = cls._readConfigFile(filename, path)
# Create properties dict if necessary
if cls._properties is None:
cls._properties = dict()
for name in properties:
if 'value' in properties[name]:
cls._properties[name] = properties[name]['value']
@classmethod
def _readConfigFile(cls, filename, path=None):
""" Parse the given XML file and return a dict describing the file.
Parameters:
----------------------------------------------------------------
filename: name of XML file to parse (no path)
path: path of the XML file. If None, then use the standard
configuration search path.
retval: returns a dict with each property as a key and a dict of all
the property's attributes as value
"""
outputProperties = dict()
# Get the path to the config files.
if path is None:
filePath = cls.findConfigFile(filename)
else:
filePath = os.path.join(path, filename)
# ------------------------------------------------------------------
# Read in the config file
try:
if filePath is not None:
try:
# Use warn since console log level is set to warning
_getLoggerBase().debug("Loading config file: %s", filePath)
with open(filePath, 'r') as inp:
contents = inp.read()
except Exception:
raise RuntimeError("Expected configuration file at %s" % filePath)
else:
# If the file was not found in the normal search paths, which includes
# checking the NTA_CONF_PATH, we'll try loading it from pkg_resources.
try:
contents = resource_string("nupic.support", filename)
except Exception as resourceException:
# We expect these to be read, and if they don't exist we'll just use
# an empty configuration string.
if filename in [USER_CONFIG, CUSTOM_CONFIG]:
contents = '<configuration/>'
else:
raise resourceException
elements = ElementTree.XML(contents)
if elements.tag != 'configuration':
raise RuntimeError("Expected top-level element to be 'configuration' "
"but got '%s'" % (elements.tag))
# ------------------------------------------------------------------
# Add in each property found
propertyElements = elements.findall('./property')
for propertyItem in propertyElements:
propInfo = dict()
# Parse this property element
propertyAttributes = list(propertyItem)
for propertyAttribute in propertyAttributes:
propInfo[propertyAttribute.tag] = propertyAttribute.text
# Get the name
name = propInfo.get('name', None)
# value is allowed to be empty string
if 'value' in propInfo and propInfo['value'] is None:
value = ''
else:
value = propInfo.get('value', None)
if value is None:
if 'novalue' in propInfo:
# Placeholder "novalue" properties are intended to be overridden
# via dynamic configuration or another configuration layer.
continue
else:
raise RuntimeError("Missing 'value' element within the property "
"element: => %s " % (str(propInfo)))
# The value is allowed to contain substitution tags of the form
# ${env.VARNAME}, which should be substituted with the corresponding
# environment variable values
restOfValue = value
value = ''
while True:
# Find the beginning of substitution tag
pos = restOfValue.find('${env.')
if pos == -1:
# No more environment variable substitutions
value += restOfValue
break
# Append prefix to value accumulator
value += restOfValue[0:pos]
# Find the end of current substitution tag
varTailPos = restOfValue.find('}', pos)
if varTailPos == -1:
raise RuntimeError(
"Trailing environment variable tag delimiter '}'"
" not found in %r" % (restOfValue))
# Extract environment variable name from tag
varname = restOfValue[pos + 6:varTailPos]
if varname not in os.environ:
raise RuntimeError("Attempting to use the value of the environment"
" variable %r, which is not defined" % (
varname))
envVarValue = os.environ[varname]
value += envVarValue
restOfValue = restOfValue[varTailPos + 1:]
# Check for errors
if name is None:
raise RuntimeError(
"Missing 'name' element within following property "
"element:\n => %s " % (str(propInfo)))
propInfo['value'] = value
outputProperties[name] = propInfo
return outputProperties
except Exception:
_getLoggerBase().exception("Error while parsing configuration file: %s.",
filePath)
raise
@classmethod
def clear(cls):
""" Clear out the entire configuration.
"""
cls._properties = None
cls._configPaths = None
@classmethod
def findConfigFile(cls, filename):
""" Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
Parameters:
----------------------------------------------------------------
filename: name of file to locate
"""
paths = cls.getConfigPaths()
for p in paths:
testPath = os.path.join(p, filename)
if os.path.isfile(testPath):
return os.path.join(p, filename)
@classmethod
def getConfigPaths(cls):
""" Return the list of paths to search for configuration files.
Parameters:
----------------------------------------------------------------
retval: list of paths.
"""
configPaths = []
if cls._configPaths is not None:
return cls._configPaths
else:
if 'NTA_CONF_PATH' in os.environ:
configVar = os.environ['NTA_CONF_PATH']
# Return as a list of paths
configPaths = configVar.split(os.pathsep)
return configPaths
@classmethod
def setConfigPaths(cls, paths):
""" Modify the paths we use to search for configuration files.
Parameters:
----------------------------------------------------------------
paths: list of paths to search for config files.
"""
cls._configPaths = list(paths)
@classmethod
def _readStdConfigFiles(cls):
""" Read in all standard configuration files
"""
# Default one first
cls.readConfigFile(DEFAULT_CONFIG)
# Site specific one can override properties defined in default
cls.readConfigFile(USER_CONFIG)
def _getLogger():
return logging.getLogger("com.numenta.nupic.tools.configuration_custom")
class Configuration(ConfigurationBase):
""" This class extends the ConfigurationBase implementation with the ability
to read and write custom, persistent parameters. The custom settings will be
stored in the nupic-custom.xml file.
If the environment variable 'NTA_CONF_PATH' is defined, then the configuration
files are expected to be in the NTA_CONF_PATH search path, which is a ':'
separated list of directories (on Windows the seperator is a ';').
If NTA_CONF_PATH is not defined, then it is assumed to be NTA/conf/default
(typically ~/nupic/current/conf/default).
"""
@classmethod
def getCustomDict(cls):
""" Return a dict containing all custom configuration properties
Parameters:
----------------------------------------------------------------
retval: dict containing all custom configuration properties.
"""
return _CustomConfigurationFileWrapper.getCustomDict()
@classmethod
def setCustomProperty(cls, propertyName, value):
""" Set a single custom setting and persist it to the custom
configuration store.
Parameters:
----------------------------------------------------------------
propertyName: string containing the name of the property to get
value: value to set the property to
"""
cls.setCustomProperties({propertyName: value})
@classmethod
def setCustomProperties(cls, properties):
""" Set multiple custom properties and persist them to the custom
configuration store.
Parameters:
----------------------------------------------------------------
properties: a dict of property name/value pairs to set
"""
_getLogger().info("Setting custom configuration properties=%r; caller=%r",
properties, traceback.format_stack())
_CustomConfigurationFileWrapper.edit(properties)
for propertyName, value in properties.iteritems():
cls.set(propertyName, value)
@classmethod
def clear(cls):
""" Clear all configuration properties from in-memory cache, but do NOT
alter the custom configuration file. Used in unit-testing.
"""
# Clear the in-memory settings cache, forcing reload upon subsequent "get"
# request.
super(Configuration, cls).clear()
# Reset in-memory custom configuration info.
_CustomConfigurationFileWrapper.clear(persistent=False)
@classmethod
def resetCustomConfig(cls):
""" Clear all custom configuration settings and delete the persistent
custom configuration store.
"""
_getLogger().info("Resetting all custom configuration properties; "
"caller=%r", traceback.format_stack())
# Clear the in-memory settings cache, forcing reload upon subsequent "get"
# request.
super(Configuration, cls).clear()
# Delete the persistent custom configuration store and reset in-memory
# custom configuration info
_CustomConfigurationFileWrapper.clear(persistent=True)
@classmethod
def loadCustomConfig(cls):
""" Loads custom configuration settings from their persistent storage.
DO NOT CALL THIS: It's typically not necessary to call this method
directly - see NOTE below.
NOTE: this method exists *solely* for the benefit of prepare_conf.py, which
needs to load configuration files selectively.
"""
cls.readConfigFile(_CustomConfigurationFileWrapper.customFileName)
@classmethod
def _readStdConfigFiles(cls):
""" Intercept the _readStdConfigFiles call from our base config class to
read in base and custom configuration settings.
"""
super(Configuration, cls)._readStdConfigFiles()
cls.loadCustomConfig()
class _CustomConfigurationFileWrapper(object):
"""
Private class to handle creation, deletion and editing of the custom
configuration file used by this implementation of Configuration.
Supports persistent changes to nupic-custom.xml configuration file.
This class only applies changes to the local instance.
For cluster wide changes see nupic-services.py or nupic.cluster.NupicServices
"""
# Name of the custom xml file to be created
customFileName = 'nupic-custom.xml'
# Stores the path to the file
# If none, findConfigFile is used to find path to file; defaults to
# NTA_CONF_PATH[0]
_path = None
@classmethod
def clear(cls, persistent=False):
""" If persistent is True, delete the temporary file
Parameters:
----------------------------------------------------------------
persistent: if True, custom configuration file is deleted
"""
if persistent:
try:
os.unlink(cls.getPath())
except OSError, e:
if e.errno != errno.ENOENT:
_getLogger().exception("Error %s while trying to remove dynamic " \
"configuration file: %s", e.errno,
cls.getPath())
raise
cls._path = None
@classmethod
def getCustomDict(cls):
""" Returns a dict of all temporary values in custom configuration file
"""
if not os.path.exists(cls.getPath()):
return dict()
properties = Configuration._readConfigFile(os.path.basename(
cls.getPath()), os.path.dirname(cls.getPath()))
values = dict()
for propName in properties:
if 'value' in properties[propName]:
values[propName] = properties[propName]['value']
return values
@classmethod
def edit(cls, properties):
""" Edits the XML configuration file with the parameters specified by
properties
Parameters:
----------------------------------------------------------------
properties: dict of settings to be applied to the custom configuration store
(key is property name, value is value)
"""
copyOfProperties = copy(properties)
configFilePath = cls.getPath()
try:
with open(configFilePath, 'r') as fp:
contents = fp.read()
except IOError, e:
if e.errno != errno.ENOENT:
_getLogger().exception("Error %s reading custom configuration store "
"from %s, while editing properties %s.",
e.errno, configFilePath, properties)
raise
contents = '<configuration/>'
try:
elements = ElementTree.XML(contents)
ElementTree.tostring(elements)
except Exception, e:
# Raising error as RuntimeError with custom message since ElementTree
# exceptions aren't clear.
msg = "File contents of custom configuration is corrupt. File " \
"location: %s; Contents: '%s'. Original Error (%s): %s." % \
(configFilePath, contents, type(e), e)
_getLogger().exception(msg)
raise RuntimeError(msg), None, sys.exc_info()[2]
if elements.tag != 'configuration':
e = "Expected top-level element to be 'configuration' but got '%s'" % \
(elements.tag)
_getLogger().error(e)
raise RuntimeError(e)
# Apply new properties to matching settings in the custom config store;
# pop matching properties from our copy of the properties dict
for propertyItem in elements.findall('./property'):
propInfo = dict((attr.tag, attr.text) for attr in propertyItem)
name = propInfo['name']
if name in copyOfProperties:
foundValues = propertyItem.findall('./value')
if len(foundValues) > 0:
foundValues[0].text = str(copyOfProperties.pop(name))
if not copyOfProperties:
break
else:
e = "Property %s missing value tag." % (name,)
_getLogger().error(e)
raise RuntimeError(e)
# Add unmatched remaining properties to custom config store
for propertyName, value in copyOfProperties.iteritems():
newProp = ElementTree.Element('property')
nameTag = ElementTree.Element('name')
nameTag.text = propertyName
newProp.append(nameTag)
valueTag = ElementTree.Element('value')
valueTag.text = str(value)
newProp.append(valueTag)
elements.append(newProp)
try:
makeDirectoryFromAbsolutePath(os.path.dirname(configFilePath))
with open(configFilePath, 'w') as fp:
fp.write(ElementTree.tostring(elements))
except Exception, e:
_getLogger().exception("Error while saving custom configuration "
"properties %s in %s.", properties,
configFilePath)
raise
@classmethod
def _setPath(cls):
""" Sets the path of the custom configuration file
"""
cls._path = os.path.join(os.environ['NTA_DYNAMIC_CONF_DIR'],
cls.customFileName)
@classmethod
def getPath(cls):
""" Get the path of the custom configuration file
"""
if cls._path is None:
cls._setPath()
return cls._path
| 25,502 | Python | .py | 625 | 34.3504 | 80 | 0.638317 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,033 | experiment_generator.py | numenta_nupic-legacy/src/nupic/swarming/exp_generator/experiment_generator.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This utility can generate an OPF experiment and permutation script based on
a data file and other optional arguments.
"""
import os
import types
import json
import re
import sys
import copy
import pprint
import tempfile
from optparse import OptionParser
import validictory
from pkg_resources import resource_stream
from nupic.frameworks.opf import jsonschema
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.support import aggregationDivide
from nupic.support.configuration import Configuration
from nupic.support.enum import Enum
from nupic.swarming.experiment_utils import (InferenceType, InferenceElement)
#############################################################################
# Global constants
# Space characters representing one level of indent in our generated python
# data structures
_INDENT_STEP = 2
_ONE_INDENT = ' ' * _INDENT_STEP
_ILLEGAL_FIELDNAME_CHARACTERS = "\\"
METRIC_WINDOW = int(Configuration.get("nupic.opf.metricWindow"))
# Enum to characterize potential generation environments
OpfEnvironment = Enum(Nupic='nupic',
Experiment='opfExperiment')
class _ExpGeneratorException(Exception):
""" Base class for all ExpGenerator-specific exceptions
"""
pass
class _CreateDirectoryException(_ExpGeneratorException):
""" Raised on error creating the experiment directory
Attributes:
reason: the reason the exception was raised (usually an exception)
"""
def __init__(self, dirPath, reason):
"""
dirPath: the path that we attempted to create for experiment files
reason: any object that can be converted to a string that explains
the reason (may be an exception)
"""
super(_CreateDirectoryException, self).__init__(
("Error creating directory " + \
"<%s>: %s.") % (str(dirPath), str(reason)))
self.reason = reason
class _InvalidFunctionArgException(_ExpGeneratorException):
"""
This exception may be raised in response to invalid or incompatible function
arguments.
"""
pass
class _InvalidCommandArgException(_ExpGeneratorException):
"""
This exception may be raised in response to invalid or incompatible command
arguments/values. When the program is executed from command-line, the handler
is expected to report the error (_outputErrorReport()) and exit the program
with error status=1.
"""
pass
class _ErrorReportingException(_ExpGeneratorException):
"""
This exception may be raised by our error result reporting code. When
this exception is handled, there is no point in calling the error result
reporting code again. The typical response should be to re-raise this
exception.
"""
def __init__(self, problem, precursor):
"""
problem: a string-convertible object that describes the problem
experienced by the error-reporting funciton.
precursor: a string-convertible object that explains
the original error that the error-reporting function
was attempting to report when it encountered its own failure.
"""
super(_ErrorReportingException, self).__init__(
("Encountered error: '%s' while reporting " + \
"error: '%s'.") \
% (problem, precursor))
class FieldTypeError(_ExpGeneratorException):
pass
def _makeUsageErrorStr(errorString, usageString):
""" Combines an error string and usage string into a regular format, so they
all look consistent.
"""
return "ERROR: %s (%s)" % (errorString, usageString)
def _handleShowSchemaOption():
""" Displays command schema to stdout and exit program
"""
print "\n============== BEGIN INPUT SCHEMA for --description =========>>"
print(json.dumps(_getExperimentDescriptionSchema(), indent=_INDENT_STEP*2))
print "\n<<============== END OF INPUT SCHEMA for --description ========"
return
def _handleDescriptionOption(cmdArgStr, outDir, usageStr, hsVersion,
claDescriptionTemplateFile):
"""
Parses and validates the --description option args and executes the
request
Parameters:
-----------------------------------------------------------------------
cmdArgStr: JSON string compatible with _gExperimentDescriptionSchema
outDir: where to place generated experiment files
usageStr: program usage string
hsVersion: which version of hypersearch permutations file to generate, can
be 'v1' or 'v2'
claDescriptionTemplateFile: Filename containing the template description
retval: nothing
"""
# convert --description arg from JSON string to dict
try:
args = json.loads(cmdArgStr)
except Exception, e:
raise _InvalidCommandArgException(
_makeUsageErrorStr(
("JSON arg parsing failed for --description: %s\n" + \
"ARG=<%s>") % (str(e), cmdArgStr), usageStr))
#print "PARSED JSON ARGS=\n%s" % (json.dumps(args, indent=4))
filesDescription = _generateExperiment(args, outDir, hsVersion=hsVersion,
claDescriptionTemplateFile = claDescriptionTemplateFile)
pprint.pprint(filesDescription)
return
def _handleDescriptionFromFileOption(filename, outDir, usageStr, hsVersion,
claDescriptionTemplateFile):
"""
Parses and validates the --descriptionFromFile option and executes the
request
Parameters:
-----------------------------------------------------------------------
filename: File from which we'll extract description JSON
outDir: where to place generated experiment files
usageStr: program usage string
hsVersion: which version of hypersearch permutations file to generate, can
be 'v1' or 'v2'
claDescriptionTemplateFile: Filename containing the template description
retval: nothing
"""
try:
fileHandle = open(filename, 'r')
JSONStringFromFile = fileHandle.read().splitlines()
JSONStringFromFile = ''.join(JSONStringFromFile)
except Exception, e:
raise _InvalidCommandArgException(
_makeUsageErrorStr(
("File open failed for --descriptionFromFile: %s\n" + \
"ARG=<%s>") % (str(e), filename), usageStr))
_handleDescriptionOption(JSONStringFromFile, outDir, usageStr,
hsVersion=hsVersion,
claDescriptionTemplateFile = claDescriptionTemplateFile)
return
def _isInt(x, precision = 0.0001):
"""
Return (isInt, intValue) for a given floating point number.
Parameters:
----------------------------------------------------------------------
x: floating point number to evaluate
precision: desired precision
retval: (isInt, intValue)
isInt: True if x is close enough to an integer value
intValue: x as an integer
"""
xInt = int(round(x))
return (abs(x - xInt) < precision * x, xInt)
def _isString(obj):
"""
returns whether or not the object is a string
"""
return type(obj) in types.StringTypes
def _quoteAndEscape(string):
"""
string: input string (ascii or unicode)
Returns: a quoted string with characters that are represented in python via
escape sequences converted to those escape sequences
"""
assert _isString(string)
return pprint.pformat(string)
def _indentLines(str, indentLevels = 1, indentFirstLine=True):
""" Indent all lines in the given string
str: input string
indentLevels: number of levels of indentation to apply
indentFirstLine: if False, the 1st line will not be indented
Returns: The result string with all lines indented
"""
indent = _ONE_INDENT * indentLevels
lines = str.splitlines(True)
result = ''
if len(lines) > 0 and not indentFirstLine:
first = 1
result += lines[0]
else:
first = 0
for line in lines[first:]:
result += indent + line
return result
def _isCategory(fieldType):
"""Prediction function for determining whether a function is a categorical
variable or a scalar variable. Mainly used for determining the appropriate
metrics.
"""
if fieldType == 'string':
return True
if fieldType == 'int' or fieldType=='float':
return False
def _generateMetricSpecString(inferenceElement, metric,
params=None, field=None,
returnLabel=False):
""" Generates the string representation of a MetricSpec object, and returns
the metric key associated with the metric.
Parameters:
-----------------------------------------------------------------------
inferenceElement:
An InferenceElement value that indicates which part of the inference this
metric is computed on
metric:
The type of the metric being computed (e.g. aae, avg_error)
params:
A dictionary of parameters for the metric. The keys are the parameter names
and the values should be the parameter values (e.g. window=200)
field:
The name of the field for which this metric is being computed
returnLabel:
If True, returns the label of the MetricSpec that was generated
"""
metricSpecArgs = dict(metric=metric,
field=field,
params=params,
inferenceElement=inferenceElement)
metricSpecAsString = "MetricSpec(%s)" % \
', '.join(['%s=%r' % (item[0],item[1])
for item in metricSpecArgs.iteritems()])
if not returnLabel:
return metricSpecAsString
spec = MetricSpec(**metricSpecArgs)
metricLabel = spec.getLabel()
return metricSpecAsString, metricLabel
def _generateFileFromTemplates(templateFileNames, outputFilePath,
replacementDict):
""" Generates a file by applying token replacements to the given template
file
templateFileName:
A list of template file names; these files are assumed to be in
the same directory as the running experiment_generator.py script.
ExpGenerator will perform the substitution and concanetate
the files in the order they are specified
outputFilePath: Absolute path of the output file
replacementDict:
A dictionary of token/replacement pairs
"""
# Find out where we're running from so we know where to find templates
installPath = os.path.dirname(__file__)
outputFile = open(outputFilePath, "w")
outputLines = []
inputLines = []
firstFile = True
for templateFileName in templateFileNames:
# Separate lines from each file by two blank lines.
if not firstFile:
inputLines.extend([os.linesep]*2)
firstFile = False
inputFilePath = os.path.join(installPath, templateFileName)
inputFile = open(inputFilePath)
inputLines.extend(inputFile.readlines())
inputFile.close()
print "Writing ", len(inputLines), "lines..."
for line in inputLines:
tempLine = line
# Enumerate through each key in replacementDict and replace with value
for k, v in replacementDict.iteritems():
if v is None:
v = "None"
tempLine = re.sub(k, v, tempLine)
outputFile.write(tempLine)
outputFile.close()
def _generateEncoderChoicesV1(fieldInfo):
""" Return a list of possible encoder parameter combinations for the given
field and the default aggregation function to use. Each parameter combination
is a dict defining the parameters for the encoder. Here is an example
return value for the encoderChoicesList:
[
None,
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,1)
},
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,3)
},
],
Parameters:
--------------------------------------------------
fieldInfo: item from the 'includedFields' section of the
description JSON object
retval: (encoderChoicesList, aggFunction)
encoderChoicesList: a list of encoder choice lists for this field.
Most fields will generate just 1 encoder choice list.
DateTime fields can generate 2 or more encoder choice lists,
one for dayOfWeek, one for timeOfDay, etc.
aggFunction: name of aggregation function to use for this
field type
"""
width = 7
fieldName = fieldInfo['fieldName']
fieldType = fieldInfo['fieldType']
encoderChoicesList = []
# Scalar?
if fieldType in ['float', 'int']:
aggFunction = 'mean'
encoders = [None]
for n in (13, 50, 150, 500):
encoder = dict(type='ScalarSpaceEncoder', name=fieldName, fieldname=fieldName,
n=n, w=width, clipInput=True,space="absolute")
if 'minValue' in fieldInfo:
encoder['minval'] = fieldInfo['minValue']
if 'maxValue' in fieldInfo:
encoder['maxval'] = fieldInfo['maxValue']
encoders.append(encoder)
encoderChoicesList.append(encoders)
# String?
elif fieldType == 'string':
aggFunction = 'first'
encoders = [None]
encoder = dict(type='SDRCategoryEncoder', name=fieldName,
fieldname=fieldName, n=100, w=width)
encoders.append(encoder)
encoderChoicesList.append(encoders)
# Datetime?
elif fieldType == 'datetime':
aggFunction = 'first'
# First, the time of day representation
encoders = [None]
for radius in (1, 8):
encoder = dict(type='DateEncoder', name='%s_timeOfDay' % (fieldName),
fieldname=fieldName, timeOfDay=(width, radius))
encoders.append(encoder)
encoderChoicesList.append(encoders)
# Now, the day of week representation
encoders = [None]
for radius in (1, 3):
encoder = dict(type='DateEncoder', name='%s_dayOfWeek' % (fieldName),
fieldname=fieldName, dayOfWeek=(width, radius))
encoders.append(encoder)
encoderChoicesList.append(encoders)
else:
raise RuntimeError("Unsupported field type '%s'" % (fieldType))
# Return results
return (encoderChoicesList, aggFunction)
def _generateEncoderStringsV1(includedFields):
""" Generate and return the following encoder related substitution variables:
encoderSpecsStr:
For the base description file, this string defines the default
encoding dicts for each encoder. For example:
'__gym_encoder' : { 'fieldname': 'gym',
'n': 13,
'name': 'gym',
'type': 'SDRCategoryEncoder',
'w': 7},
'__address_encoder' : { 'fieldname': 'address',
'n': 13,
'name': 'address',
'type': 'SDRCategoryEncoder',
'w': 7}
encoderSchemaStr:
For the base description file, this is a list containing a
DeferredDictLookup entry for each encoder. For example:
[DeferredDictLookup('__gym_encoder'),
DeferredDictLookup('__address_encoder'),
DeferredDictLookup('__timestamp_timeOfDay_encoder'),
DeferredDictLookup('__timestamp_dayOfWeek_encoder'),
DeferredDictLookup('__consumption_encoder')],
permEncoderChoicesStr:
For the permutations file, this defines the possible
encoder dicts for each encoder. For example:
'__timestamp_dayOfWeek_encoder': [
None,
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,1)
},
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,3)
},
],
'__field_consumption_encoder': [
None,
{'fieldname':'consumption',
'name': 'consumption',
'type':'AdaptiveScalarEncoder',
'n': 13,
'w': 7,
}
]
Parameters:
--------------------------------------------------
includedFields: item from the 'includedFields' section of the
description JSON object. This is a list of dicts, each
dict defining the field name, type, and optional min
and max values.
retval: (encoderSpecsStr, encoderSchemaStr permEncoderChoicesStr)
"""
# ------------------------------------------------------------------------
# First accumulate the possible choices for each encoder
encoderChoicesList = []
for fieldInfo in includedFields:
fieldName = fieldInfo['fieldName']
# Get the list of encoder choices for this field
(choicesList, aggFunction) = _generateEncoderChoicesV1(fieldInfo)
encoderChoicesList.extend(choicesList)
# ------------------------------------------------------------------------
# Generate the string containing the encoder specs and encoder schema. See
# the function comments for an example of the encoderSpecsStr and
# encoderSchemaStr
#
encoderSpecsList = []
for encoderChoices in encoderChoicesList:
# Use the last choice as the default in the base file because the 1st is
# often None
encoder = encoderChoices[-1]
# Check for bad characters
for c in _ILLEGAL_FIELDNAME_CHARACTERS:
if encoder['name'].find(c) >= 0:
raise _ExpGeneratorException("Illegal character in field: %r (%r)" % (
c, encoder['name']))
encoderSpecsList.append("%s: \n%s%s" % (
_quoteAndEscape(encoder['name']),
2*_ONE_INDENT,
pprint.pformat(encoder, indent=2*_INDENT_STEP)))
encoderSpecsStr = ',\n '.join(encoderSpecsList)
# ------------------------------------------------------------------------
# Generate the string containing the permutation encoder choices. See the
# function comments above for an example of the permEncoderChoicesStr
permEncoderChoicesList = []
for encoderChoices in encoderChoicesList:
permEncoderChoicesList.append("%s: %s," % (
_quoteAndEscape(encoderChoices[-1]['name']),
pprint.pformat(encoderChoices, indent=2*_INDENT_STEP)))
permEncoderChoicesStr = '\n'.join(permEncoderChoicesList)
permEncoderChoicesStr = _indentLines(permEncoderChoicesStr, 1,
indentFirstLine=False)
# Return results
return (encoderSpecsStr, permEncoderChoicesStr)
def _generatePermEncoderStr(options, encoderDict):
""" Generate the string that defines the permutations to apply for a given
encoder.
Parameters:
-----------------------------------------------------------------------
options: experiment params
encoderDict: the encoder dict, which gets placed into the description.py
For example, if the encoderDict contains:
'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
The return string will contain:
"PermuteEncoder(fieldName='consumption',
encoderClass='AdaptiveScalarEncoder',
w=21,
n=PermuteInt(28, 521),
clipInput=True)"
"""
permStr = ""
# If it's the encoder for the classifier input, then it's always present so
# put it in as a dict in the permutations.py file instead of a
# PermuteEncoder().
if encoderDict.get('classifierOnly', False):
permStr = "dict("
for key, value in encoderDict.items():
if key == "name":
continue
if key == 'n' and encoderDict['type'] != 'SDRCategoryEncoder':
permStr += "n=PermuteInt(%d, %d), " % (encoderDict["w"] + 7,
encoderDict["w"] + 500)
else:
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
else:
# Scalar encoders
if encoderDict["type"] in ["ScalarSpaceEncoder", "AdaptiveScalarEncoder",
"ScalarEncoder", "LogEncoder"]:
permStr = "PermuteEncoder("
for key, value in encoderDict.items():
if key == "fieldname":
key = "fieldName"
elif key == "type":
key = "encoderClass"
elif key == "name":
continue
if key == "n":
permStr += "n=PermuteInt(%d, %d), " % (encoderDict["w"] + 1,
encoderDict["w"] + 500)
elif key == "runDelta":
if value and not "space" in encoderDict:
permStr += "space=PermuteChoices([%s,%s]), " \
% (_quoteAndEscape("delta"), _quoteAndEscape("absolute"))
encoderDict.pop("runDelta")
else:
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
# Category encoder
elif encoderDict["type"] in ["SDRCategoryEncoder"]:
permStr = "PermuteEncoder("
for key, value in encoderDict.items():
if key == "fieldname":
key = "fieldName"
elif key == "type":
key = "encoderClass"
elif key == "name":
continue
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
# Datetime encoder
elif encoderDict["type"] in ["DateEncoder"]:
permStr = "PermuteEncoder("
for key, value in encoderDict.items():
if key == "fieldname":
key = "fieldName"
elif key == "type":
continue
elif key == "name":
continue
if key == "timeOfDay":
permStr += "encoderClass='%s.timeOfDay', " % (encoderDict["type"])
permStr += "radius=PermuteFloat(0.5, 12), "
permStr += "w=%d, " % (value[0])
elif key == "dayOfWeek":
permStr += "encoderClass='%s.dayOfWeek', " % (encoderDict["type"])
permStr += "radius=PermuteFloat(1, 6), "
permStr += "w=%d, " % (value[0])
elif key == "weekend":
permStr += "encoderClass='%s.weekend', " % (encoderDict["type"])
permStr += "radius=PermuteChoices([1]), "
permStr += "w=%d, " % (value)
else:
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
else:
raise RuntimeError("Unsupported encoder type '%s'" % \
(encoderDict["type"]))
return permStr
def _generateEncoderStringsV2(includedFields, options):
""" Generate and return the following encoder related substitution variables:
encoderSpecsStr:
For the base description file, this string defines the default
encoding dicts for each encoder. For example:
__gym_encoder = { 'fieldname': 'gym',
'n': 13,
'name': 'gym',
'type': 'SDRCategoryEncoder',
'w': 7},
__address_encoder = { 'fieldname': 'address',
'n': 13,
'name': 'address',
'type': 'SDRCategoryEncoder',
'w': 7}
permEncoderChoicesStr:
For the permutations file, this defines the possible
encoder dicts for each encoder. For example:
'__gym_encoder' : PermuteEncoder('gym', 'SDRCategoryEncoder', w=7,
n=100),
'__address_encoder' : PermuteEncoder('address', 'SDRCategoryEncoder',
w=7, n=100),
'__timestamp_dayOfWeek_encoder' : PermuteEncoder('timestamp',
'DateEncoder.timeOfDay', w=7, radius=PermuteChoices([1, 8])),
'__consumption_encoder': PermuteEncoder('consumption', 'AdaptiveScalarEncoder',
w=7, n=PermuteInt(13, 500, 20), minval=0,
maxval=PermuteInt(100, 300, 25)),
Parameters:
--------------------------------------------------
includedFields: item from the 'includedFields' section of the
description JSON object. This is a list of dicts, each
dict defining the field name, type, and optional min
and max values.
retval: (encoderSpecsStr permEncoderChoicesStr)
"""
width = 21
encoderDictsList = []
# If this is a NontemporalClassification experiment, then the
# the "predicted" field (the classification value) should be marked to ONLY
# go to the classifier
if options['inferenceType'] in ["NontemporalClassification",
"NontemporalMultiStep",
"TemporalMultiStep",
"MultiStep"]:
classifierOnlyField = options['inferenceArgs']['predictedField']
else:
classifierOnlyField = None
# ==========================================================================
# For each field, generate the default encoding dict and PermuteEncoder
# constructor arguments
for fieldInfo in includedFields:
fieldName = fieldInfo['fieldName']
fieldType = fieldInfo['fieldType']
# ---------
# Scalar?
if fieldType in ['float', 'int']:
# n=100 is reasonably hardcoded value for n when used by description.py
# The swarming will use PermuteEncoder below, where n is variable and
# depends on w
runDelta = fieldInfo.get("runDelta", False)
if runDelta or "space" in fieldInfo:
encoderDict = dict(type='ScalarSpaceEncoder', name=fieldName,
fieldname=fieldName, n=100, w=width, clipInput=True)
if runDelta:
encoderDict["runDelta"] = True
else:
encoderDict = dict(type='AdaptiveScalarEncoder', name=fieldName,
fieldname=fieldName, n=100, w=width, clipInput=True)
if 'minValue' in fieldInfo:
encoderDict['minval'] = fieldInfo['minValue']
if 'maxValue' in fieldInfo:
encoderDict['maxval'] = fieldInfo['maxValue']
# If both min and max were specified, use a non-adaptive encoder
if ('minValue' in fieldInfo and 'maxValue' in fieldInfo) \
and (encoderDict['type'] == 'AdaptiveScalarEncoder'):
encoderDict['type'] = 'ScalarEncoder'
# Defaults may have been over-ridden by specifying an encoder type
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
if 'space' in fieldInfo:
encoderDict['space'] = fieldInfo['space']
encoderDictsList.append(encoderDict)
# ---------
# String?
elif fieldType == 'string':
encoderDict = dict(type='SDRCategoryEncoder', name=fieldName,
fieldname=fieldName, n=100+width, w=width)
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
# ---------
# Datetime?
elif fieldType == 'datetime':
# First, the time of day representation
encoderDict = dict(type='DateEncoder', name='%s_timeOfDay' % (fieldName),
fieldname=fieldName, timeOfDay=(width, 1))
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
# Now, the day of week representation
encoderDict = dict(type='DateEncoder', name='%s_dayOfWeek' % (fieldName),
fieldname=fieldName, dayOfWeek=(width, 1))
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
# Now, the day of week representation
encoderDict = dict(type='DateEncoder', name='%s_weekend' % (fieldName),
fieldname=fieldName, weekend=(width))
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
else:
raise RuntimeError("Unsupported field type '%s'" % (fieldType))
# -----------------------------------------------------------------------
# If this was the predicted field, insert another encoder that sends it
# to the classifier only
if fieldName == classifierOnlyField:
clEncoderDict = dict(encoderDict)
clEncoderDict['classifierOnly'] = True
clEncoderDict['name'] = '_classifierInput'
encoderDictsList.append(clEncoderDict)
# If the predicted field needs to be excluded, take it out of the encoder
# lists
if options["inferenceArgs"]["inputPredictedField"] == "no":
encoderDictsList.remove(encoderDict)
# Remove any encoders not in fixedFields
if options.get('fixedFields') is not None:
tempList=[]
for encoderDict in encoderDictsList:
if encoderDict['name'] in options['fixedFields']:
tempList.append(encoderDict)
encoderDictsList = tempList
# ==========================================================================
# Now generate the encoderSpecsStr and permEncoderChoicesStr strings from
# encoderDictsList and constructorStringList
encoderSpecsList = []
permEncoderChoicesList = []
for encoderDict in encoderDictsList:
if encoderDict['name'].find('\\') >= 0:
raise _ExpGeneratorException("Illegal character in field: '\\'")
# Check for bad characters
for c in _ILLEGAL_FIELDNAME_CHARACTERS:
if encoderDict['name'].find(c) >= 0:
raise _ExpGeneratorException("Illegal character %s in field %r" %(c, encoderDict['name']))
constructorStr = _generatePermEncoderStr(options, encoderDict)
encoderKey = _quoteAndEscape(encoderDict['name'])
encoderSpecsList.append("%s: %s%s" % (
encoderKey,
2*_ONE_INDENT,
pprint.pformat(encoderDict, indent=2*_INDENT_STEP)))
# Each permEncoderChoicesStr is of the form:
# PermuteEncoder('gym', 'SDRCategoryEncoder',
# w=7, n=100),
permEncoderChoicesList.append("%s: %s," % (encoderKey, constructorStr))
# Join into strings
encoderSpecsStr = ',\n '.join(encoderSpecsList)
permEncoderChoicesStr = '\n'.join(permEncoderChoicesList)
permEncoderChoicesStr = _indentLines(permEncoderChoicesStr, 1,
indentFirstLine=True)
# Return results
return (encoderSpecsStr, permEncoderChoicesStr)
def _handleJAVAParameters(options):
""" Handle legacy options (TEMPORARY) """
# Find the correct InferenceType for the Model
if 'inferenceType' not in options:
prediction = options.get('prediction', {InferenceType.TemporalNextStep:
{'optimize':True}})
inferenceType = None
for infType, value in prediction.iteritems():
if value['optimize']:
inferenceType = infType
break
if inferenceType == 'temporal':
inferenceType = InferenceType.TemporalNextStep
if inferenceType != InferenceType.TemporalNextStep:
raise _ExpGeneratorException("Unsupported inference type %s" % \
(inferenceType))
options['inferenceType'] = inferenceType
# Find the best value for the predicted field
if 'predictionField' in options:
if 'inferenceArgs' not in options:
options['inferenceArgs'] = {'predictedField': options['predictionField']}
elif 'predictedField' not in options['inferenceArgs']:
options['inferenceArgs']['predictedField'] = options['predictionField']
def _getPropertyValue(schema, propertyName, options):
"""Checks to see if property is specified in 'options'. If not, reads the
default value from the schema"""
if propertyName not in options:
paramsSchema = schema['properties'][propertyName]
if 'default' in paramsSchema:
options[propertyName] = paramsSchema['default']
else:
options[propertyName] = None
def _getExperimentDescriptionSchema():
"""
Returns the experiment description schema. This implementation loads it in
from file experimentDescriptionSchema.json.
Parameters:
--------------------------------------------------------------------------
Returns: returns a dict representing the experiment description schema.
"""
installPath = os.path.dirname(os.path.abspath(__file__))
schemaFilePath = os.path.join(installPath, "experimentDescriptionSchema.json")
return json.loads(open(schemaFilePath, 'r').read())
def _generateExperiment(options, outputDirPath, hsVersion,
claDescriptionTemplateFile):
""" Executes the --description option, which includes:
1. Perform provider compatibility checks
2. Preprocess the training and testing datasets (filter, join providers)
3. If test dataset omitted, split the training dataset into training
and testing datasets.
4. Gather statistics about the training and testing datasets.
5. Generate experiment scripts (description.py, permutaions.py)
Parameters:
--------------------------------------------------------------------------
options: dictionary that matches the schema defined by the return value of
_getExperimentDescriptionSchema(); NOTE: this arg may be modified
by this function.
outputDirPath: where to place generated files
hsVersion: which version of hypersearch permutations file to generate, can
be 'v1' or 'v2'
claDescriptionTemplateFile: Filename containing the template description
Returns: on success, returns a dictionary per _experimentResultsJSONSchema;
raises exception on error
Assumption1: input train and test files have identical field metadata
"""
_gExperimentDescriptionSchema = _getExperimentDescriptionSchema()
# Validate JSON arg using JSON schema validator
try:
validictory.validate(options, _gExperimentDescriptionSchema)
except Exception, e:
raise _InvalidCommandArgException(
("JSON arg validation failed for option --description: " + \
"%s\nOPTION ARG=%s") % (str(e), pprint.pformat(options)))
# Validate the streamDef
streamSchema = json.load(resource_stream(jsonschema.__name__,
'stream_def.json'))
try:
validictory.validate(options['streamDef'], streamSchema)
except Exception, e:
raise _InvalidCommandArgException(
("JSON arg validation failed for streamDef " + \
"%s\nOPTION ARG=%s") % (str(e), json.dumps(options)))
# -----------------------------------------------------------------------
# Handle legacy parameters from JAVA API server
# TODO: remove this!
_handleJAVAParameters(options)
# -----------------------------------------------------------------------
# Get default values
for propertyName in _gExperimentDescriptionSchema['properties']:
_getPropertyValue(_gExperimentDescriptionSchema, propertyName, options)
if options['inferenceArgs'] is not None:
infArgs = _gExperimentDescriptionSchema['properties']['inferenceArgs']
for schema in infArgs['type']:
if isinstance(schema, dict):
for propertyName in schema['properties']:
_getPropertyValue(schema, propertyName, options['inferenceArgs'])
if options['anomalyParams'] is not None:
anomalyArgs = _gExperimentDescriptionSchema['properties']['anomalyParams']
for schema in anomalyArgs['type']:
if isinstance(schema, dict):
for propertyName in schema['properties']:
_getPropertyValue(schema, propertyName, options['anomalyParams'])
# If the user specified nonTemporalClassification, make sure prediction
# steps is 0
predictionSteps = options['inferenceArgs'].get('predictionSteps', None)
if options['inferenceType'] == InferenceType.NontemporalClassification:
if predictionSteps is not None and predictionSteps != [0]:
raise RuntimeError("When NontemporalClassification is used, prediction"
" steps must be [0]")
# -------------------------------------------------------------------------
# If the user asked for 0 steps of prediction, then make this a spatial
# classification experiment
if predictionSteps == [0] \
and options['inferenceType'] in ['NontemporalMultiStep',
'TemporalMultiStep',
'MultiStep']:
options['inferenceType'] = InferenceType.NontemporalClassification
# If NontemporalClassification was chosen as the inferenceType, then the
# predicted field can NOT be used as an input
if options["inferenceType"] == InferenceType.NontemporalClassification:
if options["inferenceArgs"]["inputPredictedField"] == "yes" \
or options["inferenceArgs"]["inputPredictedField"] == "auto":
raise RuntimeError("When the inference type is NontemporalClassification"
" inputPredictedField must be set to 'no'")
options["inferenceArgs"]["inputPredictedField"] = "no"
# -----------------------------------------------------------------------
# Process the swarmSize setting, if provided
swarmSize = options['swarmSize']
if swarmSize is None:
if options["inferenceArgs"]["inputPredictedField"] is None:
options["inferenceArgs"]["inputPredictedField"] = "auto"
elif swarmSize == 'small':
if options['minParticlesPerSwarm'] is None:
options['minParticlesPerSwarm'] = 3
if options['iterationCount'] is None:
options['iterationCount'] = 100
if options['maxModels'] is None:
options['maxModels'] = 1
if options["inferenceArgs"]["inputPredictedField"] is None:
options["inferenceArgs"]["inputPredictedField"] = "yes"
elif swarmSize == 'medium':
if options['minParticlesPerSwarm'] is None:
options['minParticlesPerSwarm'] = 5
if options['iterationCount'] is None:
options['iterationCount'] = 4000
if options['maxModels'] is None:
options['maxModels'] = 200
if options["inferenceArgs"]["inputPredictedField"] is None:
options["inferenceArgs"]["inputPredictedField"] = "auto"
elif swarmSize == 'large':
if options['minParticlesPerSwarm'] is None:
options['minParticlesPerSwarm'] = 15
#options['killUselessSwarms'] = False
#options['minFieldContribution'] = -1000
#options['maxFieldBranching'] = 10
#options['tryAll3FieldCombinations'] = True
options['tryAll3FieldCombinationsWTimestamps'] = True
if options["inferenceArgs"]["inputPredictedField"] is None:
options["inferenceArgs"]["inputPredictedField"] = "auto"
else:
raise RuntimeError("Unsupported swarm size: %s" % (swarmSize))
# -----------------------------------------------------------------------
# Get token replacements
tokenReplacements = dict()
#--------------------------------------------------------------------------
# Generate the encoder related substitution strings
includedFields = options['includedFields']
if hsVersion == 'v1':
(encoderSpecsStr, permEncoderChoicesStr) = \
_generateEncoderStringsV1(includedFields)
elif hsVersion in ['v2', 'ensemble']:
(encoderSpecsStr, permEncoderChoicesStr) = \
_generateEncoderStringsV2(includedFields, options)
else:
raise RuntimeError("Unsupported hsVersion of %s" % (hsVersion))
#--------------------------------------------------------------------------
# Generate the string containing the sensor auto-reset dict.
if options['resetPeriod'] is not None:
sensorAutoResetStr = pprint.pformat(options['resetPeriod'],
indent=2*_INDENT_STEP)
else:
sensorAutoResetStr = 'None'
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in options['streamDef']:
for key in aggregationPeriod.keys():
if key in options['streamDef']['aggregation']:
aggregationPeriod[key] = options['streamDef']['aggregation'][key]
if 'fields' in options['streamDef']['aggregation']:
for (fieldName, func) in options['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
# Form the aggregation strings
aggregationInfoStr = "%s" % (pprint.pformat(aggregationInfo,
indent=2*_INDENT_STEP))
# -----------------------------------------------------------------------
# Generate the string defining the dataset. This is basically the
# streamDef, but referencing the aggregation we already pulled out into the
# config dict (which enables permuting over it)
datasetSpec = options['streamDef']
if 'aggregation' in datasetSpec:
datasetSpec.pop('aggregation')
if hasAggregation:
datasetSpec['aggregation'] = '$SUBSTITUTE'
datasetSpecStr = pprint.pformat(datasetSpec, indent=2*_INDENT_STEP)
datasetSpecStr = datasetSpecStr.replace(
"'$SUBSTITUTE'", "config['aggregationInfo']")
datasetSpecStr = _indentLines(datasetSpecStr, 2, indentFirstLine=False)
# -----------------------------------------------------------------------
# Was computeInterval specified with Multistep prediction? If so, this swarm
# should permute over different aggregations
computeInterval = options['computeInterval']
if computeInterval is not None \
and options['inferenceType'] in ['NontemporalMultiStep',
'TemporalMultiStep',
'MultiStep']:
# Compute the predictAheadTime based on the minAggregation (specified in
# the stream definition) and the number of prediction steps
predictionSteps = options['inferenceArgs'].get('predictionSteps', [1])
if len(predictionSteps) > 1:
raise _InvalidCommandArgException("Invalid predictionSteps: %s. " \
"When computeInterval is specified, there can only be one " \
"stepSize in predictionSteps." % predictionSteps)
if max(aggregationInfo.values()) == 0:
raise _InvalidCommandArgException("Missing or nil stream aggregation: "
"When computeInterval is specified, then the stream aggregation "
"interval must be non-zero.")
# Compute the predictAheadTime
numSteps = predictionSteps[0]
predictAheadTime = dict(aggregationPeriod)
for key in predictAheadTime.iterkeys():
predictAheadTime[key] *= numSteps
predictAheadTimeStr = pprint.pformat(predictAheadTime,
indent=2*_INDENT_STEP)
# This tells us to plug in a wildcard string for the prediction steps that
# we use in other parts of the description file (metrics, inferenceArgs,
# etc.)
options['dynamicPredictionSteps'] = True
else:
options['dynamicPredictionSteps'] = False
predictAheadTimeStr = "None"
# -----------------------------------------------------------------------
# Save environment-common token substitutions
tokenReplacements['\$EXP_GENERATOR_PROGRAM_PATH'] = \
_quoteAndEscape(os.path.abspath(__file__))
# If the "uber" metric 'MultiStep' was specified, then plug in TemporalMultiStep
# by default
inferenceType = options['inferenceType']
if inferenceType == 'MultiStep':
inferenceType = InferenceType.TemporalMultiStep
tokenReplacements['\$INFERENCE_TYPE'] = "'%s'" % inferenceType
# Nontemporal classificaion uses only encoder and classifier
if inferenceType == InferenceType.NontemporalClassification:
tokenReplacements['\$SP_ENABLE'] = "False"
tokenReplacements['\$TP_ENABLE'] = "False"
else:
tokenReplacements['\$SP_ENABLE'] = "True"
tokenReplacements['\$TP_ENABLE'] = "True"
tokenReplacements['\$CLA_CLASSIFIER_IMPL'] = ""
tokenReplacements['\$ANOMALY_PARAMS'] = pprint.pformat(
options['anomalyParams'], indent=2*_INDENT_STEP)
tokenReplacements['\$ENCODER_SPECS'] = encoderSpecsStr
tokenReplacements['\$SENSOR_AUTO_RESET'] = sensorAutoResetStr
tokenReplacements['\$AGGREGATION_INFO'] = aggregationInfoStr
tokenReplacements['\$DATASET_SPEC'] = datasetSpecStr
if options['iterationCount'] is None:
options['iterationCount'] = -1
tokenReplacements['\$ITERATION_COUNT'] \
= str(options['iterationCount'])
tokenReplacements['\$SP_POOL_PCT'] \
= str(options['spCoincInputPoolPct'])
tokenReplacements['\$HS_MIN_PARTICLES'] \
= str(options['minParticlesPerSwarm'])
tokenReplacements['\$SP_PERM_CONNECTED'] \
= str(options['spSynPermConnected'])
tokenReplacements['\$FIELD_PERMUTATION_LIMIT'] \
= str(options['fieldPermutationLimit'])
tokenReplacements['\$PERM_ENCODER_CHOICES'] \
= permEncoderChoicesStr
predictionSteps = options['inferenceArgs'].get('predictionSteps', [1])
predictionStepsStr = ','.join([str(x) for x in predictionSteps])
tokenReplacements['\$PREDICTION_STEPS'] = "'%s'" % (predictionStepsStr)
tokenReplacements['\$PREDICT_AHEAD_TIME'] = predictAheadTimeStr
# Option permuting over SP synapse decrement value
tokenReplacements['\$PERM_SP_CHOICES'] = ""
if options['spPermuteDecrement'] \
and options['inferenceType'] != 'NontemporalClassification':
tokenReplacements['\$PERM_SP_CHOICES'] = \
_ONE_INDENT +"'synPermInactiveDec': PermuteFloat(0.0003, 0.1),\n"
# The TM permutation parameters are not required for non-temporal networks
if options['inferenceType'] in ['NontemporalMultiStep',
'NontemporalClassification']:
tokenReplacements['\$PERM_TP_CHOICES'] = ""
else:
tokenReplacements['\$PERM_TP_CHOICES'] = \
" 'activationThreshold': PermuteInt(12, 16),\n" \
+ " 'minThreshold': PermuteInt(9, 12),\n" \
+ " 'pamLength': PermuteInt(1, 5),\n"
# If the inference type is just the generic 'MultiStep', then permute over
# temporal/nonTemporal multistep
if options['inferenceType'] == 'MultiStep':
tokenReplacements['\$PERM_INFERENCE_TYPE_CHOICES'] = \
" 'inferenceType': PermuteChoices(['NontemporalMultiStep', " \
+ "'TemporalMultiStep']),"
else:
tokenReplacements['\$PERM_INFERENCE_TYPE_CHOICES'] = ""
# The Classifier permutation parameters are only required for
# Multi-step inference types
if options['inferenceType'] in ['NontemporalMultiStep', 'TemporalMultiStep',
'MultiStep', 'TemporalAnomaly',
'NontemporalClassification']:
tokenReplacements['\$PERM_CL_CHOICES'] = \
" 'alpha': PermuteFloat(0.0001, 0.1),\n"
else:
tokenReplacements['\$PERM_CL_CHOICES'] = ""
# The Permutations alwaysIncludePredictedField setting.
# * When the experiment description has 'inputPredictedField' set to 'no', we
# simply do not put in an encoder for the predicted field.
# * When 'inputPredictedField' is set to 'auto', we include an encoder for the
# predicted field and swarming tries it out just like all the other fields.
# * When 'inputPredictedField' is set to 'yes', we include this setting in
# the permutations file which informs swarming to always use the
# predicted field (the first swarm will be the predicted field only)
tokenReplacements['\$PERM_ALWAYS_INCLUDE_PREDICTED_FIELD'] = \
"inputPredictedField = '%s'" % \
(options["inferenceArgs"]["inputPredictedField"])
# The Permutations minFieldContribution setting
if options.get('minFieldContribution', None) is not None:
tokenReplacements['\$PERM_MIN_FIELD_CONTRIBUTION'] = \
"minFieldContribution = %d" % (options['minFieldContribution'])
else:
tokenReplacements['\$PERM_MIN_FIELD_CONTRIBUTION'] = ""
# The Permutations killUselessSwarms setting
if options.get('killUselessSwarms', None) is not None:
tokenReplacements['\$PERM_KILL_USELESS_SWARMS'] = \
"killUselessSwarms = %r" % (options['killUselessSwarms'])
else:
tokenReplacements['\$PERM_KILL_USELESS_SWARMS'] = ""
# The Permutations maxFieldBranching setting
if options.get('maxFieldBranching', None) is not None:
tokenReplacements['\$PERM_MAX_FIELD_BRANCHING'] = \
"maxFieldBranching = %r" % (options['maxFieldBranching'])
else:
tokenReplacements['\$PERM_MAX_FIELD_BRANCHING'] = ""
# The Permutations tryAll3FieldCombinations setting
if options.get('tryAll3FieldCombinations', None) is not None:
tokenReplacements['\$PERM_TRY_ALL_3_FIELD_COMBINATIONS'] = \
"tryAll3FieldCombinations = %r" % (options['tryAll3FieldCombinations'])
else:
tokenReplacements['\$PERM_TRY_ALL_3_FIELD_COMBINATIONS'] = ""
# The Permutations tryAll3FieldCombinationsWTimestamps setting
if options.get('tryAll3FieldCombinationsWTimestamps', None) is not None:
tokenReplacements['\$PERM_TRY_ALL_3_FIELD_COMBINATIONS_W_TIMESTAMPS'] = \
"tryAll3FieldCombinationsWTimestamps = %r" % \
(options['tryAll3FieldCombinationsWTimestamps'])
else:
tokenReplacements['\$PERM_TRY_ALL_3_FIELD_COMBINATIONS_W_TIMESTAMPS'] = ""
# The Permutations fieldFields setting
if options.get('fixedFields', None) is not None:
tokenReplacements['\$PERM_FIXED_FIELDS'] = \
"fixedFields = %r" % (options['fixedFields'])
else:
tokenReplacements['\$PERM_FIXED_FIELDS'] = ""
# The Permutations fastSwarmModelParams setting
if options.get('fastSwarmModelParams', None) is not None:
tokenReplacements['\$PERM_FAST_SWARM_MODEL_PARAMS'] = \
"fastSwarmModelParams = %r" % (options['fastSwarmModelParams'])
else:
tokenReplacements['\$PERM_FAST_SWARM_MODEL_PARAMS'] = ""
# The Permutations maxModels setting
if options.get('maxModels', None) is not None:
tokenReplacements['\$PERM_MAX_MODELS'] = \
"maxModels = %r" % (options['maxModels'])
else:
tokenReplacements['\$PERM_MAX_MODELS'] = ""
# --------------------------------------------------------------------------
# The Aggregation choices have to be determined when we are permuting over
# aggregations.
if options['dynamicPredictionSteps']:
debugAgg = True
# First, we need to error check to insure that computeInterval is an integer
# multiple of minAggregation (aggregationPeriod)
quotient = aggregationDivide(computeInterval, aggregationPeriod)
(isInt, multiple) = _isInt(quotient)
if not isInt or multiple < 1:
raise _InvalidCommandArgException("Invalid computeInterval: %s. "
"computeInterval must be an integer multiple of the stream "
"aggregation (%s)." % (computeInterval, aggregationPeriod))
# The valid aggregation choices are governed by the following constraint,
# 1.) (minAggregation * N) * M = predictAheadTime
# (minAggregation * N) * M = maxPredictionSteps * minAggregation
# N * M = maxPredictionSteps
#
# 2.) computeInterval = K * aggregation
# computeInterval = K * (minAggregation * N)
#
# where: aggregation = minAggregation * N
# K, M and N are integers >= 1
# N = aggregation / minAggregation
# M = predictionSteps, for a particular aggregation
# K = number of predictions within each compute interval
#
# Let's build up a a list of the possible N's that satisfy the
# N * M = maxPredictionSteps constraint
mTimesN = float(predictionSteps[0])
possibleNs = []
for n in xrange(1, int(mTimesN)+1):
m = mTimesN / n
mInt = int(round(m))
if mInt < 1:
break
if abs(m - mInt) > 0.0001 * m:
continue
possibleNs.append(n)
if debugAgg:
print "All integer factors of %d are: %s" % (mTimesN, possibleNs)
# Now go through and throw out any N's that don't satisfy the constraint:
# computeInterval = K * (minAggregation * N)
aggChoices = []
for n in possibleNs:
# Compute minAggregation * N
agg = dict(aggregationPeriod)
for key in agg.iterkeys():
agg[key] *= n
# Make sure computeInterval is an integer multiple of the aggregation
# period
quotient = aggregationDivide(computeInterval, agg)
#print computeInterval, agg
#print quotient
#import sys; sys.exit()
(isInt, multiple) = _isInt(quotient)
if not isInt or multiple < 1:
continue
aggChoices.append(agg)
# Only eveluate up to 5 different aggregations
aggChoices = aggChoices[-5:]
if debugAgg:
print "Aggregation choices that will be evaluted during swarming:"
for agg in aggChoices:
print " ==>", agg
print
tokenReplacements['\$PERM_AGGREGATION_CHOICES'] = (
"PermuteChoices(%s)" % (
pprint.pformat(aggChoices, indent=2*_INDENT_STEP)))
else:
tokenReplacements['\$PERM_AGGREGATION_CHOICES'] = aggregationInfoStr
# Generate the inferenceArgs replacement tokens
_generateInferenceArgs(options, tokenReplacements)
# Generate the metric replacement tokens
_generateMetricsSubstitutions(options, tokenReplacements)
# -----------------------------------------------------------------------
# Generate Control dictionary
environment = options['environment']
if environment == OpfEnvironment.Nupic:
tokenReplacements['\$ENVIRONMENT'] = "'%s'"%OpfEnvironment.Nupic
controlTemplate = "nupicEnvironmentTemplate.tpl"
elif environment == OpfEnvironment.Experiment:
tokenReplacements['\$ENVIRONMENT'] = "'%s'"%OpfEnvironment.Experiment
controlTemplate = "opfExperimentTemplate.tpl"
else:
raise _InvalidCommandArgException("Invalid environment type %s"% environment)
# -----------------------------------------------------------------------
if outputDirPath is None:
outputDirPath = tempfile.mkdtemp()
if not os.path.exists(outputDirPath):
os.makedirs(outputDirPath)
print "Generating experiment files in directory: %s..." % (outputDirPath)
descriptionPyPath = os.path.join(outputDirPath, "description.py")
_generateFileFromTemplates([claDescriptionTemplateFile, controlTemplate],
descriptionPyPath,
tokenReplacements)
permutationsPyPath = os.path.join(outputDirPath, "permutations.py")
if hsVersion == 'v1':
_generateFileFromTemplates(['permutationsTemplateV1.tpl'],permutationsPyPath,
tokenReplacements)
elif hsVersion == 'ensemble':
_generateFileFromTemplates(['permutationsTemplateEnsemble.tpl'],permutationsPyPath,
tokenReplacements)
elif hsVersion == 'v2':
_generateFileFromTemplates(['permutationsTemplateV2.tpl'],permutationsPyPath,
tokenReplacements)
else:
raise(ValueError("This permutation version is not supported yet: %s" %
hsVersion))
print "done."
def _generateMetricsSubstitutions(options, tokenReplacements):
"""Generate the token substitution for metrics related fields.
This includes:
\$METRICS
\$LOGGED_METRICS
\$PERM_OPTIMIZE_SETTING
"""
# -----------------------------------------------------------------------
#
options['loggedMetrics'] = [".*"]
# -----------------------------------------------------------------------
# Generate the required metrics
metricList, optimizeMetricLabel = _generateMetricSpecs(options)
metricListString = ",\n".join(metricList)
metricListString = _indentLines(metricListString, 2, indentFirstLine=False)
permOptimizeSettingStr = 'minimize = "%s"' % optimizeMetricLabel
# -----------------------------------------------------------------------
# Specify which metrics should be logged
loggedMetricsListAsStr = "[%s]" % (", ".join(["'%s'"% ptrn
for ptrn in options['loggedMetrics']]))
tokenReplacements['\$LOGGED_METRICS'] \
= loggedMetricsListAsStr
tokenReplacements['\$METRICS'] = metricListString
tokenReplacements['\$PERM_OPTIMIZE_SETTING'] \
= permOptimizeSettingStr
def _generateMetricSpecs(options):
""" Generates the Metrics for a given InferenceType
Parameters:
-------------------------------------------------------------------------
options: ExpGenerator options
retval: (metricsList, optimizeMetricLabel)
metricsList: list of metric string names
optimizeMetricLabel: Name of the metric which to optimize over
"""
inferenceType = options['inferenceType']
inferenceArgs = options['inferenceArgs']
predictionSteps = inferenceArgs['predictionSteps']
metricWindow = options['metricWindow']
if metricWindow is None:
metricWindow = int(Configuration.get("nupic.opf.metricWindow"))
metricSpecStrings = []
optimizeMetricLabel = ""
# -----------------------------------------------------------------------
# Generate the metrics specified by the expGenerator paramters
metricSpecStrings.extend(_generateExtraMetricSpecs(options))
# -----------------------------------------------------------------------
optimizeMetricSpec = None
# If using a dynamically computed prediction steps (i.e. when swarming
# over aggregation is requested), then we will plug in the variable
# predictionSteps in place of the statically provided predictionSteps
# from the JSON description.
if options['dynamicPredictionSteps']:
assert len(predictionSteps) == 1
predictionSteps = ['$REPLACE_ME']
# -----------------------------------------------------------------------
# Metrics for temporal prediction
if inferenceType in (InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep,
InferenceType.NontemporalClassification,
'MultiStep'):
predictedFieldName, predictedFieldType = _getPredictedField(options)
isCategory = _isCategory(predictedFieldType)
metricNames = ('avg_err',) if isCategory else ('aae', 'altMAPE')
trivialErrorMetric = 'avg_err' if isCategory else 'altMAPE'
oneGramErrorMetric = 'avg_err' if isCategory else 'altMAPE'
movingAverageBaselineName = 'moving_mode' if isCategory else 'moving_mean'
# Multi-step metrics
for metricName in metricNames:
metricSpec, metricLabel = \
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.multiStepBestPredictions,
metric='multiStep',
params={'errorMetric': metricName,
'window':metricWindow,
'steps': predictionSteps},
returnLabel=True)
metricSpecStrings.append(metricSpec)
# If the custom error metric was specified, add that
if options["customErrorMetric"] is not None :
metricParams = dict(options["customErrorMetric"])
metricParams['errorMetric'] = 'custom_error_metric'
metricParams['steps'] = predictionSteps
# If errorWindow is not specified, make it equal to the default window
if not "errorWindow" in metricParams:
metricParams["errorWindow"] = metricWindow
metricSpec, metricLabel =_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.multiStepPredictions,
metric="multiStep",
params=metricParams,
returnLabel=True)
metricSpecStrings.append(metricSpec)
# If this is the first specified step size, optimize for it. Be sure to
# escape special characters since this is a regular expression
optimizeMetricSpec = metricSpec
metricLabel = metricLabel.replace('[', '\\[')
metricLabel = metricLabel.replace(']', '\\]')
optimizeMetricLabel = metricLabel
if options["customErrorMetric"] is not None :
optimizeMetricLabel = ".*custom_error_metric.*"
# Add in the trivial metrics
if options["runBaselines"] \
and inferenceType != InferenceType.NontemporalClassification:
for steps in predictionSteps:
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric="trivial",
params={'window':metricWindow,
"errorMetric":trivialErrorMetric,
'steps': steps})
)
##Add in the One-Gram baseline error metric
#metricSpecStrings.append(
# _generateMetricSpecString(field=predictedFieldName,
# inferenceElement=InferenceElement.encodings,
# metric="two_gram",
# params={'window':metricWindow,
# "errorMetric":oneGramErrorMetric,
# 'predictionField':predictedFieldName,
# 'steps': steps})
# )
#
#Include the baseline moving mean/mode metric
if isCategory:
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"avg_err",
"mode_window":200,
"steps": steps})
)
else :
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"altMAPE",
"mean_window":200,
"steps": steps})
)
# -----------------------------------------------------------------------
# Metrics for classification
elif inferenceType in (InferenceType.TemporalClassification):
metricName = 'avg_err'
trivialErrorMetric = 'avg_err'
oneGramErrorMetric = 'avg_err'
movingAverageBaselineName = 'moving_mode'
optimizeMetricSpec, optimizeMetricLabel = \
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric=metricName,
params={'window':metricWindow},
returnLabel=True)
metricSpecStrings.append(optimizeMetricSpec)
if options["runBaselines"]:
# If temporal, generate the trivial predictor metric
if inferenceType == InferenceType.TemporalClassification:
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric="trivial",
params={'window':metricWindow,
"errorMetric":trivialErrorMetric})
)
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric="two_gram",
params={'window':metricWindow,
"errorMetric":oneGramErrorMetric})
)
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"avg_err",
"mode_window":200})
)
# Custom Error Metric
if not options["customErrorMetric"] == None :
#If errorWindow is not specified, make it equal to the default window
if not "errorWindow" in options["customErrorMetric"]:
options["customErrorMetric"]["errorWindow"] = metricWindow
optimizeMetricSpec = _generateMetricSpecString(
inferenceElement=InferenceElement.classification,
metric="custom",
params=options["customErrorMetric"])
optimizeMetricLabel = ".*custom_error_metric.*"
metricSpecStrings.append(optimizeMetricSpec)
# -----------------------------------------------------------------------
# If plug in the predictionSteps variable for any dynamically generated
# prediction steps
if options['dynamicPredictionSteps']:
for i in range(len(metricSpecStrings)):
metricSpecStrings[i] = metricSpecStrings[i].replace(
"'$REPLACE_ME'", "predictionSteps")
optimizeMetricLabel = optimizeMetricLabel.replace(
"'$REPLACE_ME'", ".*")
return metricSpecStrings, optimizeMetricLabel
def _generateExtraMetricSpecs(options):
"""Generates the non-default metrics specified by the expGenerator params """
_metricSpecSchema = {'properties': {}}
results = []
for metric in options['metrics']:
for propertyName in _metricSpecSchema['properties'].keys():
_getPropertyValue(_metricSpecSchema, propertyName, metric)
specString, label = _generateMetricSpecString(
field=metric['field'],
metric=metric['metric'],
params=metric['params'],
inferenceElement=\
metric['inferenceElement'],
returnLabel=True)
if metric['logged']:
options['loggedMetrics'].append(label)
results.append(specString)
return results
def _getPredictedField(options):
""" Gets the predicted field and it's datatype from the options dictionary
Returns: (predictedFieldName, predictedFieldType)
"""
if not options['inferenceArgs'] or \
not options['inferenceArgs']['predictedField']:
return None, None
predictedField = options['inferenceArgs']['predictedField']
predictedFieldInfo = None
includedFields = options['includedFields']
for info in includedFields:
if info['fieldName'] == predictedField:
predictedFieldInfo = info
break
if predictedFieldInfo is None:
raise ValueError(
"Predicted field '%s' does not exist in included fields." % predictedField
)
predictedFieldType = predictedFieldInfo['fieldType']
return predictedField, predictedFieldType
def _generateInferenceArgs(options, tokenReplacements):
""" Generates the token substitutions related to the predicted field
and the supplemental arguments for prediction
"""
inferenceType = options['inferenceType']
optionInferenceArgs = options.get('inferenceArgs', None)
resultInferenceArgs = {}
predictedField = _getPredictedField(options)[0]
if inferenceType in (InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly):
assert predictedField, "Inference Type '%s' needs a predictedField "\
"specified in the inferenceArgs dictionary"\
% inferenceType
if optionInferenceArgs:
# If we will be using a dynamically created predictionSteps, plug in that
# variable name in place of the constant scalar value
if options['dynamicPredictionSteps']:
altOptionInferenceArgs = copy.deepcopy(optionInferenceArgs)
altOptionInferenceArgs['predictionSteps'] = '$REPLACE_ME'
resultInferenceArgs = pprint.pformat(altOptionInferenceArgs)
resultInferenceArgs = resultInferenceArgs.replace("'$REPLACE_ME'",
'[predictionSteps]')
else:
resultInferenceArgs = pprint.pformat(optionInferenceArgs)
tokenReplacements['\$INFERENCE_ARGS'] = resultInferenceArgs
tokenReplacements['\$PREDICTION_FIELD'] = predictedField
def expGenerator(args):
""" Parses, validates, and executes command-line options;
On success: Performs requested operation and exits program normally
On Error: Dumps exception/error info in JSON format to stdout and exits the
program with non-zero status.
"""
# -----------------------------------------------------------------
# Parse command line options
#
parser = OptionParser()
parser.set_usage("%prog [options] --description='{json object with args}'\n" + \
"%prog [options] --descriptionFromFile='{filename}'\n" + \
"%prog [options] --showSchema")
parser.add_option("--description", dest = "description",
help = "Tells ExpGenerator to generate an experiment description.py and " \
"permutations.py file using the given JSON formatted experiment "\
"description string.")
parser.add_option("--descriptionFromFile", dest = 'descriptionFromFile',
help = "Tells ExpGenerator to open the given filename and use it's " \
"contents as the JSON formatted experiment description.")
parser.add_option("--claDescriptionTemplateFile",
dest = 'claDescriptionTemplateFile',
default = 'claDescriptionTemplate.tpl',
help = "The file containing the template description file for " \
" ExpGenerator [default: %default]")
parser.add_option("--showSchema",
action="store_true", dest="showSchema",
help="Prints the JSON schemas for the --description arg.")
parser.add_option("--version", dest = 'version', default='v2',
help = "Generate the permutations file for this version of hypersearch."
" Possible choices are 'v1' and 'v2' [default: %default].")
parser.add_option("--outDir",
dest = "outDir", default=None,
help = "Where to generate experiment. If not specified, " \
"then a temp directory will be created"
)
(options, remainingArgs) = parser.parse_args(args)
#print("OPTIONS=%s" % (str(options)))
# -----------------------------------------------------------------
# Check for unprocessed args
#
if len(remainingArgs) > 0:
raise _InvalidCommandArgException(
_makeUsageErrorStr("Unexpected command-line args: <%s>" % \
(' '.join(remainingArgs),), parser.get_usage()))
# -----------------------------------------------------------------
# Check for use of mutually-exclusive options
#
activeOptions = filter(lambda x: getattr(options, x) != None,
('description', 'showSchema'))
if len(activeOptions) > 1:
raise _InvalidCommandArgException(
_makeUsageErrorStr(("The specified command options are " + \
"mutually-exclusive: %s") % (activeOptions,),
parser.get_usage()))
# -----------------------------------------------------------------
# Process requests
#
if options.showSchema:
_handleShowSchemaOption()
elif options.description:
_handleDescriptionOption(options.description, options.outDir,
parser.get_usage(), hsVersion=options.version,
claDescriptionTemplateFile = options.claDescriptionTemplateFile)
elif options.descriptionFromFile:
_handleDescriptionFromFileOption(options.descriptionFromFile,
options.outDir, parser.get_usage(), hsVersion=options.version,
claDescriptionTemplateFile = options.claDescriptionTemplateFile)
else:
raise _InvalidCommandArgException(
_makeUsageErrorStr("Error in validating command options. No option "
"provided:\n", parser.get_usage()))
if __name__ == '__main__':
expGenerator(sys.argv[1:])
| 74,538 | Python | .py | 1,590 | 38.487421 | 99 | 0.629612 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,034 | __init__.py | numenta_nupic-legacy/src/nupic/swarming/exp_generator/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,035 | client_jobs_dao.py | numenta_nupic-legacy/src/nupic/database/client_jobs_dao.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Add Context Manager (with ...) support for Jython/Python 2.5.x (
# ClientJobManager used to use Jython); it's a noop in newer Python versions.
from __future__ import with_statement
import collections
import logging
from optparse import OptionParser
import sys
import traceback
import uuid
from nupic.support.decorators import logExceptions #, logEntryExit
from nupic.database.connection import ConnectionFactory
from nupic.support.configuration import Configuration
from nupic.support import pymysql_helpers
_MODULE_NAME = "nupic.database.ClientJobsDAO"
_LOGGER = logging.getLogger(__name__)
class InvalidConnectionException(Exception):
""" This exception is raised when a worker tries to update a model record that
belongs to another worker. Ownership of a model is determined by the database
connection id
"""
pass
# Create a decorator for retrying idempotent SQL operations upon transient MySQL
# failures.
# WARNING: do NOT indiscriminately decorate non-idempotent operations with this
# decorator as it # may case undesirable side-effects, such as multiple row
# insertions, etc.
# NOTE: having this as a global permits us to switch parameters wholesale (e.g.,
# timeout)
g_retrySQL = pymysql_helpers.retrySQL(logger=_LOGGER)
def _abbreviate(text, threshold):
""" Abbreviate the given text to threshold chars and append an ellipsis if its
length exceeds threshold; used for logging;
NOTE: the resulting text could be longer than threshold due to the ellipsis
"""
if text is not None and len(text) > threshold:
text = text[:threshold] + "..."
return text
class ClientJobsDAO(object):
""" This Data Access Object (DAO) is used for creating, managing, and updating
the ClientJobs database. The ClientJobs database is a MySQL database shared by
the UI, Stream Manager (StreamMgr), and the engine. The clients (UI and
StreamMgr) make calls to this DAO to request new jobs (Hypersearch, stream
jobs, model evaluations, etc.) and the engine queries and updates it to manage
and keep track of the jobs and report progress and results back to the
clients.
This class is primarily a collection of static methods that work with the
client jobs database. But, rather than taking the approach of declaring each
method as static, we provide just one static class method that returns a
reference to the (one) ClientJobsDAO instance allocated for the current
process (and perhaps in the future, for the current thread). This approach
gives us the flexibility in the future of perhaps allocating one instance per
thread and makes the internal design a bit more compartmentalized (by enabling
the use of instance variables). Note: This is generally referred to as
the singleton pattern.
A typical call is made in the following manner:
ClientJobsDAO.get().jobInfo()
If the caller desires, they have the option of caching the instance returned
from ClientJobsDAO.get(), i.e.:
cjDAO = ClientJobsDAO.get()
cjDAO.jobInfo()
cjDAO.jobSetStatus(...)
There are two tables in this database, the jobs table and the models table, as
described below. The jobs table keeps track of all jobs. The models table is
filled in by hypersearch jobs with the results of each model that it
evaluates.
Jobs table. The field names are given as:
internal mysql field name (public API field name)
field description
---------------------------------------------------------------------------
job_id (jobId): Generated by the database when a new job is inserted by a
client. This is an auto-incrementing ID that is unique among all
jobs.
client (client): The name of the client (i.e. 'UI', 'StreamMgr', etc.).
client_info (clientInfo): Arbitrary data specified by client.
client_key (clientKey): Foreign key as defined by the client.
cmd_line (cmdLine): Command line to be used to launch each worker process for
the job.
params (params): JSON encoded dict of job specific parameters that are
useful to the worker processes for this job. This field is provided
by the client when it inserts the job and can be fetched out of the
database by worker processes (based on job_id) if needed.
job_hash (jobHash): hash of the job, provided by the client, used for
detecting identical jobs when they use the jobInsertUnique() call.
Clients that don't care about whether jobs are unique or not do not
have to generate or care about this field.
status (status): The engine will periodically update the status field as the
job runs.
This is an enum. Possible values are:
STATUS_NOTSTARTED client has just added this job to the table
STATUS_STARTING: a CJM is in the process of launching this job in the
engine
STATUS_RUNNING: the engine is currently running this job
STATUS_TESTMODE: the job is being run by the test framework
outside the context of hadoop, should be
ignored
STATUS_COMPLETED: the job has completed. The completion_reason
field describes the manner in which it
completed
completion_reason (completionReason): Why this job completed. Possible values
are:
CMPL_REASON_SUCCESS: job completed successfully
CMPL_REASON_KILLED: job was killed by ClientJobManager
CMPL_REASON_CANCELLED: job was cancelled by user
CMPL_REASON_ERROR: job encountered an error. The completion_msg
field contains a text description of the error
completion_msg (completionMsg): Text description of error that occurred if job
terminated with completion_reason of CMPL_REASON_ERROR or
CMPL_REASON_KILLED
worker_completion_msg (workerCompletionMsg): Why this job completed, according
to the worker(s).
cancel (cancel): Set by the clent if/when it wants to cancel a job.
Periodically polled by the CJM and used as a signal to kill the job.
TODO: the above claim doesn't match current reality: presently,
Hypersearch and Production workers poll the cancel field.
start_time (startTime): date and time of when this job started.
end_time (endTime): date and time of when this job completed.
results (results): A JSON encoded dict of the results of a hypersearch job.
The dict contains the following fields. Note that this dict
is NULL before any model has reportedits results:
bestModel: The modelID of the best performing model so far
bestValue: The value of the optimized metric for the best model
_eng_last_update_time (engLastUpdateTime): Time stamp of last update. Used
for detecting stalled jobs.
_eng_cjm_conn_id (engCjmConnId): The database client connection ID of the CJM
(Client Job Manager) starting up this job. Set and checked while the
job is in the 'starting' phase. Used for detecting and dealing with
stalled CJM's
_eng_worker_state (engWorkerState): JSON encoded data structure
for private use by the workers.
_eng_status (engStatus): String used to send status messages from the engine
to the UI. For informative purposes only.
_eng_model_milestones (engModelMilestones): JSON encoded object with
information about global model milestone results.
minimum_workers (minimumWorkers): min number of desired workers at a time.
If 0, no workers will be allocated in a crunch
maximum_workers (maximumWorkers): max number of desired workers at a time. If
0, then use as many as practical given load on the cluster.
priority (priority): job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher
priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative values
are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY)
_eng_allocate_new_workers (engAllocateNewWorkers): Should the scheduling
algorithm allocate new workers to this job? If a specialized worker
willingly gives up control, we set this field to FALSE to avoid
allocating new workers.
_eng_untended_dead_workers (engUntendedDeadWorkers): If a specialized worker
fails or is killed by the scheduler, we set this feild to TRUE to
indicate that the worker is dead.
num_failed_workers (numFailedWorkers): The number of failed specialized workers
for this job. if the number of failures is greater than
max.failed.attempts, we mark the job as failed
last_failed_worker_error_msg (lastFailedWorkerErrorMsg): Error message of the
most recent failed specialized worker
Models table: field description
---------------------------------------------------------------------------
model_id (modelId): Generated by the database when the engine inserts a new
model. This is an auto-incrementing ID that is globally unique
among all models of all jobs.
job_id (jobId) : The job_id of the job in the Jobs Table that this model
belongs to.
params (params): JSON encoded dict of all the parameters used to generate
this particular model. The dict contains the following properties:
paramValues = modelParamValuesDict,
paramLabels = modelParamValueLabelsDict,
experimentName = expName
status (status): Enumeration of the model's status. Possible values are:
STATUS_NOTSTARTED: This model's parameters have been chosen, but
no worker is evaluating it yet.
STATUS_RUNNING: This model is currently being evaluated by a
worker
STATUS_COMPLETED: This model has finished running. The
completion_reason field describes why it
completed.
completion_reason (completionReason) : Why this model completed. Possible
values are:
CMPL_REASON_EOF: model reached the end of the dataset
CMPL_REASON_STOPPED: model stopped because it reached maturity
and was not deemed the best model.
CMPL_REASON_KILLED: model was killed by the terminator logic
before maturing and before reaching EOF
because it was doing so poorly
CMPL_REASON_ERROR: model encountered an error. The completion_msg
field contains a text description of the
error
completion_msg (completionMsg): Text description of error that occurred if
model terminated with completion_reason of CMPL_REASON_ERROR or
CMPL_REASON_KILLED
results (results): JSON encoded structure containing the latest online
metrics produced by the model. The engine periodically updates this
as the model runs.
optimized_metric(optimizedMetric): The value of the metric over which
this model is being optimized. Stroring this separately in the database
allows us to search through to find the best metric faster
update_counter (updateCounter): Incremented by the UI whenever the engine
updates the results field. This makes it easier and faster for the
UI to determine which models have changed results.
num_records (numRecords): Number of records (from the original dataset,
before aggregation) that have been processed so far by this model.
Periodically updated by the engine as the model is evaluated.
start_time (startTime): Date and time of when this model started being
evaluated.
end_time (endTime): Date and time of when this model completed.
cpu_time (cpuTime): How much actual CPU time was spent evaluating this
model (in seconds). This excludes any time the process spent
sleeping, or otherwise not executing code.
model_checkpoint_id (modelCheckpointId): Checkpoint identifier for this model
(after it has been saved)
_eng_params_hash (engParamsHash): MD5 hash of the params. Used for detecting
duplicate models.
_eng_particle_hash (engParticleHash): MD5 hash of the model's particle (for
particle swarm optimization algorithm).
_eng_last_update_time (engLastUpdateTime): Time stamp of last update. Used
for detecting stalled workers.
_eng_task_tracker_id (engTaskTrackerId): ID of the Hadoop Task Tracker
managing the worker
_eng_worker_id (engWorkerId): ID of the Hadoop Map Task (worker) for this task
_eng_attempt_id (engAttemptId): Hadoop attempt ID of this task attempt
_eng_worker_conn_id (engWorkerConnId): database client connection ID of the
hypersearch worker that is running this model
_eng_milestones (engMilestones): JSON encoded list of metric values for the
model at each milestone point.
_eng_stop (engStop): One of the STOP_REASON_XXX enumerated value strings
(or None). This gets set to STOP_REASON_KILLED if the terminator
decides that the performance of this model is so poor that it
should be terminated immediately. This gets set to STOP_REASON_STOPPED
if Hypersearch decides that the search is over and this model
doesn't have to run anymore.
_eng_matured (engMatured): Set by the model maturity checker when it decides
that this model has "matured".
"""
# Job priority range values.
#
# Higher-priority jobs will be scheduled to run at the expense of the
# lower-priority jobs, and higher-priority job tasks will preempt those with
# lower priority if there is inadequate supply of scheduling slots. Excess
# lower priority job tasks will starve as long as slot demand exceeds supply.
MIN_JOB_PRIORITY = -100 # Minimum job scheduling priority
DEFAULT_JOB_PRIORITY = 0 # Default job scheduling priority
MAX_JOB_PRIORITY = 100 # Maximum job scheduling priority
# Equates for job and model status
STATUS_NOTSTARTED = "notStarted"
STATUS_STARTING = "starting"
STATUS_RUNNING = "running"
STATUS_TESTMODE = "testMode"
STATUS_COMPLETED = "completed"
# Equates for job and model completion_reason field
CMPL_REASON_SUCCESS = "success" # jobs only - job completed successfully
CMPL_REASON_CANCELLED = "cancel" # jobs only - canceled by user;
# TODO: presently, no one seems to set the
# CANCELLED reason
CMPL_REASON_KILLED = "killed" # jobs or models - model killed by
# terminator for poor results or job
# killed by ClientJobManager
CMPL_REASON_ERROR = "error" # jobs or models - Encountered an error
# while running
CMPL_REASON_EOF = "eof" # models only - model reached end of
# data set
CMPL_REASON_STOPPED = "stopped" # models only - model stopped running
# because it matured and was not deemed
# the best model.
CMPL_REASON_ORPHAN = "orphan" # models only - model was detected as an
# orphan because the worker running it
# failed to update the last_update_time.
# This model is considered dead and a new
# one may be created to take its place.
# Equates for the model _eng_stop field
STOP_REASON_KILLED = "killed" # killed by model terminator for poor
# results before it matured.
STOP_REASON_STOPPED = "stopped" # stopped because it had matured and was
# not deemed the best model
# Equates for the cleaned field
CLEAN_NOT_DONE = "notdone" # Cleaning for job is not done
CLEAN_DONE = "done" # Cleaning for job is done
# Equates for standard job classes
JOB_TYPE_HS = "hypersearch"
JOB_TYPE_PM = "production-model"
JOB_TYPE_SM = "stream-manager"
JOB_TYPE_TEST = "test"
HASH_MAX_LEN = 16
""" max size, in bytes, of the hash used for model and job identification """
CLIENT_MAX_LEN = 8
""" max size, in bytes of the 'client' field's value """
class _TableInfoBase(object):
""" Common table info fields; base class """
__slots__ = ("tableName", "dbFieldNames", "publicFieldNames",
"pubToDBNameDict", "dbToPubNameDict",)
def __init__(self):
self.tableName = None
""" Database-qualified table name (databasename.tablename) """
self.dbFieldNames = None
""" Names of fields in schema """
self.publicFieldNames = None
""" Public names of fields generated programmatically: e.g.,
word1_word2_word3 => word1Word2Word3 """
self.pubToDBNameDict = None
self.dbToPubNameDict = None
""" These dicts convert public field names to DB names and vice versa """
class _JobsTableInfo(_TableInfoBase):
__slots__ = ("jobInfoNamedTuple",)
# The namedtuple classes that we use to return information from various
# functions
jobDemandNamedTuple = collections.namedtuple(
'_jobDemandNamedTuple',
['jobId', 'minimumWorkers', 'maximumWorkers', 'priority',
'engAllocateNewWorkers', 'engUntendedDeadWorkers', 'numFailedWorkers',
'engJobType'])
def __init__(self):
super(ClientJobsDAO._JobsTableInfo, self).__init__()
# Generated dynamically after introspecting jobs table columns. Attributes
# of this namedtuple are the public names of the jobs table columns.
self.jobInfoNamedTuple = None
class _ModelsTableInfo(_TableInfoBase):
__slots__ = ("modelInfoNamedTuple",)
# The namedtuple classes that we use to return information from various
# functions
getParamsNamedTuple = collections.namedtuple(
'_modelsGetParamsNamedTuple', ['modelId', 'params', 'engParamsHash'])
getResultAndStatusNamedTuple = collections.namedtuple(
'_modelsGetResultAndStatusNamedTuple',
['modelId', 'results', 'status', 'updateCounter', 'numRecords',
'completionReason', 'completionMsg', 'engParamsHash', 'engMatured'])
getUpdateCountersNamedTuple = collections.namedtuple(
'_modelsGetUpdateCountersNamedTuple', ['modelId', 'updateCounter'])
def __init__(self):
super(ClientJobsDAO._ModelsTableInfo, self).__init__()
# Generated dynamically after introspecting models columns. Attributes
# of this namedtuple are the public names of the models table columns.
self.modelInfoNamedTuple = None
_SEQUENCE_TYPES = (list, set, tuple)
""" Sequence types that we accept in args """
# There is one instance of the ClientJobsDAO per process. This class static
# variable gets filled in the first time the process calls
# ClientJobsDAO.get()
_instance = None
# The root name and version of the database. The actual database name is
# something of the form "client_jobs_v2_suffix".
_DB_ROOT_NAME = 'client_jobs'
_DB_VERSION = 30
@classmethod
def dbNamePrefix(cls):
""" Get the beginning part of the database name for the current version
of the database. This, concatenated with
'_' + Configuration.get('nupic.cluster.database.nameSuffix') will
produce the actual database name used.
"""
return cls.__getDBNamePrefixForVersion(cls._DB_VERSION)
@classmethod
def __getDBNamePrefixForVersion(cls, dbVersion):
""" Get the beginning part of the database name for the given database
version. This, concatenated with
'_' + Configuration.get('nupic.cluster.database.nameSuffix') will
produce the actual database name used.
Parameters:
----------------------------------------------------------------
dbVersion: ClientJobs database version number
retval: the ClientJobs database name prefix for the given DB version
"""
return '%s_v%d' % (cls._DB_ROOT_NAME, dbVersion)
@classmethod
def _getDBName(cls):
""" Generates the ClientJobs database name for the current version of the
database; "semi-private" class method for use by friends of the class.
Parameters:
----------------------------------------------------------------
retval: the ClientJobs database name
"""
return cls.__getDBNameForVersion(cls._DB_VERSION)
@classmethod
def __getDBNameForVersion(cls, dbVersion):
""" Generates the ClientJobs database name for the given version of the
database
Parameters:
----------------------------------------------------------------
dbVersion: ClientJobs database version number
retval: the ClientJobs database name for the given DB version
"""
# DB Name prefix for the given version
prefix = cls.__getDBNamePrefixForVersion(dbVersion)
# DB Name suffix
suffix = Configuration.get('nupic.cluster.database.nameSuffix')
# Replace dash and dot with underscore (e.g. 'ec2-user' or ec2.user will break SQL)
suffix = suffix.replace("-", "_")
suffix = suffix.replace(".", "_")
# Create the name of the database for the given DB version
dbName = '%s_%s' % (prefix, suffix)
return dbName
@staticmethod
@logExceptions(_LOGGER)
def get():
""" Get the instance of the ClientJobsDAO created for this process (or
perhaps at some point in the future, for this thread).
Parameters:
----------------------------------------------------------------
retval: instance of ClientJobsDAO
"""
# Instantiate if needed
if ClientJobsDAO._instance is None:
cjDAO = ClientJobsDAO()
cjDAO.connect()
ClientJobsDAO._instance = cjDAO
# Return the instance to the caller
return ClientJobsDAO._instance
@logExceptions(_LOGGER)
def __init__(self):
""" Instantiate a ClientJobsDAO instance.
Parameters:
----------------------------------------------------------------
"""
self._logger = _LOGGER
# Usage error to instantiate more than 1 instance per process
assert (ClientJobsDAO._instance is None)
# Create the name of the current version database
self.dbName = self._getDBName()
# NOTE: we set the table names here; the rest of the table info is set when
# the tables are initialized during connect()
self._jobs = self._JobsTableInfo()
self._jobs.tableName = '%s.jobs' % (self.dbName)
self._models = self._ModelsTableInfo()
self._models.tableName = '%s.models' % (self.dbName)
# Our connection ID, filled in during connect()
self._connectionID = None
@property
def jobsTableName(self):
return self._jobs.tableName
@property
def modelsTableName(self):
return self._models.tableName
def _columnNameDBToPublic(self, dbName):
""" Convert a database internal column name to a public name. This
takes something of the form word1_word2_word3 and converts it to:
word1Word2Word3. If the db field name starts with '_', it is stripped out
so that the name is compatible with collections.namedtuple.
for example: _word1_word2_word3 => word1Word2Word3
Parameters:
--------------------------------------------------------------
dbName: database internal field name
retval: public name
"""
words = dbName.split('_')
if dbName.startswith('_'):
words = words[1:]
pubWords = [words[0]]
for word in words[1:]:
pubWords.append(word[0].upper() + word[1:])
return ''.join(pubWords)
@logExceptions(_LOGGER)
@g_retrySQL
def connect(self, deleteOldVersions=False, recreate=False):
""" Locate the current version of the jobs DB or create a new one, and
optionally delete old versions laying around. If desired, this method
can be called at any time to re-create the tables from scratch, delete
old versions of the database, etc.
Parameters:
----------------------------------------------------------------
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
"""
# Initialize tables, if needed
with ConnectionFactory.get() as conn:
# Initialize tables
self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions,
recreate=recreate)
# Save our connection id
conn.cursor.execute('SELECT CONNECTION_ID()')
self._connectionID = conn.cursor.fetchall()[0][0]
self._logger.info("clientJobsConnectionID=%r", self._connectionID)
return
@logExceptions(_LOGGER)
def _initTables(self, cursor, deleteOldVersions, recreate):
""" Initialize tables, if needed
Parameters:
----------------------------------------------------------------
cursor: SQL cursor
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
"""
# Delete old versions if they exist
if deleteOldVersions:
self._logger.info(
"Dropping old versions of client_jobs DB; called from: %r",
traceback.format_stack())
for i in range(self._DB_VERSION):
cursor.execute('DROP DATABASE IF EXISTS %s' %
(self.__getDBNameForVersion(i),))
# Create the database if necessary
if recreate:
self._logger.info(
"Dropping client_jobs DB %r; called from: %r",
self.dbName, traceback.format_stack())
cursor.execute('DROP DATABASE IF EXISTS %s' % (self.dbName))
cursor.execute('CREATE DATABASE IF NOT EXISTS %s' % (self.dbName))
# Get the list of tables
cursor.execute('SHOW TABLES IN %s' % (self.dbName))
output = cursor.fetchall()
tableNames = [x[0] for x in output]
# ------------------------------------------------------------------------
# Create the jobs table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'jobs' not in tableNames:
self._logger.info("Creating table %r", self.jobsTableName)
fields = [
'job_id INT UNSIGNED NOT NULL AUTO_INCREMENT',
# unique jobID
'client CHAR(%d)' % (self.CLIENT_MAX_LEN),
# name of client (UI, StrmMgr, etc.)
'client_info LONGTEXT',
# Arbitrary data defined by the client
'client_key varchar(255)',
# Foreign key as defined by the client.
'cmd_line LONGTEXT',
# command line to use to launch each worker process
'params LONGTEXT',
# JSON encoded params for the job, for use by the worker processes
'job_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# unique hash of the job, provided by the client. Used for detecting
# identical job requests from the same client when they use the
# jobInsertUnique() method.
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings.
# NOTE: This is the job completion reason according to the hadoop
# job-tracker. A success here does not necessarily mean the
# workers were "happy" with the job. To see if the workers
# failed, check the worker_completion_reason
'completion_msg LONGTEXT',
# Why this job completed, according to job-tracker
'worker_completion_reason VARCHAR(16) DEFAULT "%s"' % \
self.CMPL_REASON_SUCCESS,
# One of the CMPL_REASON_XXX enumerated value strings. This is
# may be changed to CMPL_REASON_ERROR if any workers encounter
# an error while running the job.
'worker_completion_msg LONGTEXT',
# Why this job completed, according to workers. If
# worker_completion_reason is set to CMPL_REASON_ERROR, this will
# contain the error information.
'cancel BOOLEAN DEFAULT FALSE',
# set by UI, polled by engine
'start_time DATETIME DEFAULT NULL',
# When job started
'end_time DATETIME DEFAULT NULL',
# When job ended
'results LONGTEXT',
# JSON dict with general information about the results of the job,
# including the ID and value of the best model
# TODO: different semantics for results field of ProductionJob
'_eng_job_type VARCHAR(32)',
# String used to specify the type of job that this is. Current
# choices are hypersearch, production worker, or stream worker
'minimum_workers INT UNSIGNED DEFAULT 0',
# min number of desired workers at a time. If 0, no workers will be
# allocated in a crunch
'maximum_workers INT UNSIGNED DEFAULT 0',
# max number of desired workers at a time. If 0, then use as many
# as practical given load on the cluster.
'priority INT DEFAULT %d' % self.DEFAULT_JOB_PRIORITY,
# job scheduling priority; 0 is the default priority (
# ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher
# priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative
# values are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY)
'_eng_allocate_new_workers BOOLEAN DEFAULT TRUE',
# Should the scheduling algorithm allocate new workers to this job?
# If a specialized worker willingly gives up control, we set this
# field to FALSE to avoid allocating new workers.
'_eng_untended_dead_workers BOOLEAN DEFAULT FALSE',
# If a specialized worker fails or is killed by the scheduler, we
# set this feild to TRUE to indicate that the worker is dead
'num_failed_workers INT UNSIGNED DEFAULT 0',
# The number of failed specialized workers for this job. If the
# number of failures is >= max.failed.attempts, we mark the job
# as failed
'last_failed_worker_error_msg LONGTEXT',
# Error message of the most recent specialized failed worker
'_eng_cleaning_status VARCHAR(16) DEFAULT "%s"' % \
self.CLEAN_NOT_DONE,
# Has the job been garbage collected, this includes removing
# unneeded # model output caches, s3 checkpoints.
'gen_base_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'gen_permutations LONGTEXT',
# The contents of the generated permutations.py file from
# hypersearch requests. This is generated by the Hypersearch workers
# and stored here for reference, debugging, and development
# purposes.
'_eng_last_update_time DATETIME DEFAULT NULL',
# time stamp of last update, used for detecting stalled jobs
'_eng_cjm_conn_id INT UNSIGNED',
# ID of the CJM starting up this job
'_eng_worker_state LONGTEXT',
# JSON encoded state of the hypersearch in progress, for private
# use by the Hypersearch workers
'_eng_status LONGTEXT',
# String used for status messages sent from the engine for
# informative purposes only. Usually printed periodically by
# clients watching a job progress.
'_eng_model_milestones LONGTEXT',
# JSon encoded object with information about global model milestone
# results
'PRIMARY KEY (job_id)',
'UNIQUE INDEX (client, job_hash)',
'INDEX (status)',
'INDEX (client_key)'
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS %s (%s) %s' % \
(self.jobsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ------------------------------------------------------------------------
# Create the models table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'models' not in tableNames:
self._logger.info("Creating table %r", self.modelsTableName)
fields = [
'model_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT',
# globally unique model ID
'job_id INT UNSIGNED NOT NULL',
# jobID
'params LONGTEXT NOT NULL',
# JSON encoded params for the model
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings
'completion_msg LONGTEXT',
# Why this job completed
'results LONGTEXT DEFAULT NULL',
# JSON encoded structure containing metrics produced by the model
'optimized_metric FLOAT ',
#Value of the particular metric we are optimizing in hypersearch
'update_counter INT UNSIGNED DEFAULT 0',
# incremented by engine every time the results is updated
'num_records INT UNSIGNED DEFAULT 0',
# number of records processed so far
'start_time DATETIME DEFAULT NULL',
# When this model started being evaluated
'end_time DATETIME DEFAULT NULL',
# When this model completed
'cpu_time FLOAT DEFAULT 0',
# How much actual CPU time was spent on this model, in seconds. This
# excludes time the process spent sleeping, or otherwise not
# actually executing code.
'model_checkpoint_id LONGTEXT',
# Checkpoint identifier for this model (after it has been saved)
'gen_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'_eng_params_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# MD5 hash of the params
'_eng_particle_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# MD5 hash of the particle info for PSO algorithm
'_eng_last_update_time DATETIME DEFAULT NULL',
# time stamp of last update, used for detecting stalled workers
'_eng_task_tracker_id TINYBLOB',
# Hadoop Task Tracker ID
'_eng_worker_id TINYBLOB',
# Hadoop Map Task ID
'_eng_attempt_id TINYBLOB',
# Hadoop Map task attempt ID
'_eng_worker_conn_id INT DEFAULT 0',
# database client connection ID of the worker that is running this
# model
'_eng_milestones LONGTEXT',
# A JSON encoded list of metric values for the model at each
# milestone point
'_eng_stop VARCHAR(16) DEFAULT NULL',
# One of the STOP_REASON_XXX enumerated value strings. Set either by
# the swarm terminator of either the current, or another
# Hypersearch worker.
'_eng_matured BOOLEAN DEFAULT FALSE',
# Set by the model maturity-checker when it decides that this model
# has "matured". This means that it has reached the point of
# not getting better results with more data.
'PRIMARY KEY (model_id)',
'UNIQUE INDEX (job_id, _eng_params_hash)',
'UNIQUE INDEX (job_id, _eng_particle_hash)',
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS %s (%s) %s' % \
(self.modelsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ---------------------------------------------------------------------
# Get the field names for each table
cursor.execute('DESCRIBE %s' % (self.jobsTableName))
fields = cursor.fetchall()
self._jobs.dbFieldNames = [str(field[0]) for field in fields]
cursor.execute('DESCRIBE %s' % (self.modelsTableName))
fields = cursor.fetchall()
self._models.dbFieldNames = [str(field[0]) for field in fields]
# ---------------------------------------------------------------------
# Generate the public names
self._jobs.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._jobs.dbFieldNames]
self._models.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._models.dbFieldNames]
# ---------------------------------------------------------------------
# Generate the name conversion dicts
self._jobs.pubToDBNameDict = dict(
zip(self._jobs.publicFieldNames, self._jobs.dbFieldNames))
self._jobs.dbToPubNameDict = dict(
zip(self._jobs.dbFieldNames, self._jobs.publicFieldNames))
self._models.pubToDBNameDict = dict(
zip(self._models.publicFieldNames, self._models.dbFieldNames))
self._models.dbToPubNameDict = dict(
zip(self._models.dbFieldNames, self._models.publicFieldNames))
# ---------------------------------------------------------------------
# Generate the dynamic namedtuple classes we use
self._models.modelInfoNamedTuple = collections.namedtuple(
'_modelInfoNamedTuple', self._models.publicFieldNames)
self._jobs.jobInfoNamedTuple = collections.namedtuple(
'_jobInfoNamedTuple', self._jobs.publicFieldNames)
return
def _getMatchingRowsNoRetries(self, tableInfo, conn, fieldsToMatch,
selectFieldNames, maxRows=None):
""" Return a sequence of matching rows with the requested field values from
a table or empty sequence if nothing matched.
tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance
conn: Owned connection acquired from ConnectionFactory.get()
fieldsToMatch: Dictionary of internal fieldName/value mappings that
identify the desired rows. If a value is an instance of
ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the
operator 'IN' will be used in the corresponding SQL
predicate; if the value is bool: "IS TRUE/FALSE"; if the
value is None: "IS NULL"; '=' will be used for all other
cases.
selectFieldNames:
list of fields to return, using internal field names
maxRows: maximum number of rows to return; unlimited if maxRows
is None
retval: A sequence of matching rows, each row consisting of field
values in the order of the requested field names. Empty
sequence is returned when not match exists.
"""
assert fieldsToMatch, repr(fieldsToMatch)
assert all(k in tableInfo.dbFieldNames
for k in fieldsToMatch.iterkeys()), repr(fieldsToMatch)
assert selectFieldNames, repr(selectFieldNames)
assert all(f in tableInfo.dbFieldNames for f in selectFieldNames), repr(
selectFieldNames)
# NOTE: make sure match expressions and values are in the same order
matchPairs = fieldsToMatch.items()
matchExpressionGen = (
p[0] +
(' IS ' + {True:'TRUE', False:'FALSE'}[p[1]] if isinstance(p[1], bool)
else ' IS NULL' if p[1] is None
else ' IN %s' if isinstance(p[1], self._SEQUENCE_TYPES)
else '=%s')
for p in matchPairs)
matchFieldValues = [p[1] for p in matchPairs
if (not isinstance(p[1], (bool)) and p[1] is not None)]
query = 'SELECT %s FROM %s WHERE (%s)' % (
','.join(selectFieldNames), tableInfo.tableName,
' AND '.join(matchExpressionGen))
sqlParams = matchFieldValues
if maxRows is not None:
query += ' LIMIT %s'
sqlParams.append(maxRows)
conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
if rows:
assert maxRows is None or len(rows) <= maxRows, "%d !<= %d" % (
len(rows), maxRows)
assert len(rows[0]) == len(selectFieldNames), "%d != %d" % (
len(rows[0]), len(selectFieldNames))
else:
rows = tuple()
return rows
@g_retrySQL
def _getMatchingRowsWithRetries(self, tableInfo, fieldsToMatch,
selectFieldNames, maxRows=None):
""" Like _getMatchingRowsNoRetries(), but with retries on transient MySQL
failures
"""
with ConnectionFactory.get() as conn:
return self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,
selectFieldNames, maxRows)
def _getOneMatchingRowNoRetries(self, tableInfo, conn, fieldsToMatch,
selectFieldNames):
""" Return a single matching row with the requested field values from the
the requested table or None if nothing matched.
tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance
conn: Owned connection acquired from ConnectionFactory.get()
fieldsToMatch: Dictionary of internal fieldName/value mappings that
identify the desired rows. If a value is an instance of
ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the
operator 'IN' will be used in the corresponding SQL
predicate; if the value is bool: "IS TRUE/FALSE"; if the
value is None: "IS NULL"; '=' will be used for all other
cases.
selectFieldNames:
list of fields to return, using internal field names
retval: A sequence of field values of the matching row in the order
of the given field names; or None if there was no match.
"""
rows = self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,
selectFieldNames, maxRows=1)
if rows:
assert len(rows) == 1, repr(len(rows))
result = rows[0]
else:
result = None
return result
@g_retrySQL
def _getOneMatchingRowWithRetries(self, tableInfo, fieldsToMatch,
selectFieldNames):
""" Like _getOneMatchingRowNoRetries(), but with retries on transient MySQL
failures
"""
with ConnectionFactory.get() as conn:
return self._getOneMatchingRowNoRetries(tableInfo, conn, fieldsToMatch,
selectFieldNames)
@classmethod
def _normalizeHash(cls, hashValue):
hashLen = len(hashValue)
if hashLen < cls.HASH_MAX_LEN:
hashValue += '\0' * (cls.HASH_MAX_LEN - hashLen)
else:
assert hashLen <= cls.HASH_MAX_LEN, (
"Hash is too long: hashLen=%r; hashValue=%r") % (hashLen, hashValue)
return hashValue
def _insertOrGetUniqueJobNoRetries(
self, conn, client, cmdLine, jobHash, clientInfo, clientKey, params,
minimumWorkers, maximumWorkers, jobType, priority, alreadyRunning):
""" Attempt to insert a row with the given parameters into the jobs table.
Return jobID of the inserted row, or of an existing row with matching
client/jobHash key.
The combination of client and jobHash are expected to be unique (enforced
by a unique index on the two columns).
NOTE: It's possibe that this or another process (on this or another machine)
already inserted a row with matching client/jobHash key (e.g.,
StreamMgr). This may also happen undetected by this function due to a
partially-successful insert operation (e.g., row inserted, but then
connection was lost while reading response) followed by retries either of
this function or in SteadyDB module.
Parameters:
----------------------------------------------------------------
conn: Owned connection acquired from ConnectionFactory.get()
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
jobHash: unique hash of this job. The caller must insure that this,
together with client, uniquely identifies this job request
for the purposes of detecting duplicates.
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop- it gives it a job
record to work with.
retval: jobID of the inserted jobs row, or of an existing jobs row
with matching client/jobHash key
"""
assert len(client) <= self.CLIENT_MAX_LEN, "client too long:" + repr(client)
assert cmdLine, "Unexpected empty or None command-line: " + repr(cmdLine)
assert len(jobHash) == self.HASH_MAX_LEN, "wrong hash len=%d" % len(jobHash)
# Initial status
if alreadyRunning:
# STATUS_TESTMODE, so that scheduler won't pick it up (for in-proc tests)
initStatus = self.STATUS_TESTMODE
else:
initStatus = self.STATUS_NOTSTARTED
# Create a new job entry
query = 'INSERT IGNORE INTO %s (status, client, client_info, client_key,' \
'cmd_line, params, job_hash, _eng_last_update_time, ' \
'minimum_workers, maximum_workers, priority, _eng_job_type) ' \
' VALUES (%%s, %%s, %%s, %%s, %%s, %%s, %%s, ' \
' UTC_TIMESTAMP(), %%s, %%s, %%s, %%s) ' \
% (self.jobsTableName,)
sqlParams = (initStatus, client, clientInfo, clientKey, cmdLine, params,
jobHash, minimumWorkers, maximumWorkers, priority, jobType)
numRowsInserted = conn.cursor.execute(query, sqlParams)
jobID = 0
if numRowsInserted == 1:
# Get the chosen job id
# NOTE: LAST_INSERT_ID() returns 0 after intermittent connection failure
conn.cursor.execute('SELECT LAST_INSERT_ID()')
jobID = conn.cursor.fetchall()[0][0]
if jobID == 0:
self._logger.warn(
'_insertOrGetUniqueJobNoRetries: SELECT LAST_INSERT_ID() returned 0; '
'likely due to reconnection in SteadyDB following INSERT. '
'jobType=%r; client=%r; clientInfo=%r; clientKey=%s; jobHash=%r; '
'cmdLine=%r',
jobType, client, _abbreviate(clientInfo, 32), clientKey, jobHash,
cmdLine)
else:
# Assumption: nothing was inserted because this is a retry and the row
# with this client/hash already exists from our prior
# partially-successful attempt; or row with matching client/jobHash was
# inserted already by some process on some machine.
assert numRowsInserted == 0, repr(numRowsInserted)
if jobID == 0:
# Recover from intermittent failure in a partially-successful attempt;
# or row with matching client/jobHash was already in table
row = self._getOneMatchingRowNoRetries(
self._jobs, conn, dict(client=client, job_hash=jobHash), ['job_id'])
assert row is not None
assert len(row) == 1, 'Unexpected num fields: ' + repr(len(row))
jobID = row[0]
# ---------------------------------------------------------------------
# If asked to enter the job in the running state, set the connection id
# and start time as well
if alreadyRunning:
query = 'UPDATE %s SET _eng_cjm_conn_id=%%s, ' \
' start_time=UTC_TIMESTAMP(), ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE job_id=%%s' \
% (self.jobsTableName,)
conn.cursor.execute(query, (self._connectionID, jobID))
return jobID
def _resumeJobNoRetries(self, conn, jobID, alreadyRunning):
""" Resumes processing of an existing job that is presently in the
STATUS_COMPLETED state.
NOTE: this is primarily for resuming suspended Production and Stream Jobs; DO
NOT use it on Hypersearch jobs.
This prepares an existing job entry to resume processing. The CJM is always
periodically sweeping the jobs table and when it finds a job that is ready
to run, it will proceed to start it up on Hadoop.
Parameters:
----------------------------------------------------------------
conn: Owned connection acquired from ConnectionFactory.get()
jobID: jobID of the job to resume
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop.
raises: Throws a RuntimeError if no rows are affected. This could
either be because:
1) Because there was not matching jobID
2) or if the status of the job was not STATUS_COMPLETED.
retval: nothing
"""
# Initial status
if alreadyRunning:
# Use STATUS_TESTMODE so scheduler will leave our row alone
initStatus = self.STATUS_TESTMODE
else:
initStatus = self.STATUS_NOTSTARTED
# NOTE: some of our clients (e.g., StreamMgr) may call us (directly or
# indirectly) for the same job from different processes (even different
# machines), so we should be prepared for the update to fail; same holds
# if the UPDATE succeeds, but connection fails while reading result
assignments = [
'status=%s',
'completion_reason=DEFAULT',
'completion_msg=DEFAULT',
'worker_completion_reason=DEFAULT',
'worker_completion_msg=DEFAULT',
'end_time=DEFAULT',
'cancel=DEFAULT',
'_eng_last_update_time=UTC_TIMESTAMP()',
'_eng_allocate_new_workers=DEFAULT',
'_eng_untended_dead_workers=DEFAULT',
'num_failed_workers=DEFAULT',
'last_failed_worker_error_msg=DEFAULT',
'_eng_cleaning_status=DEFAULT',
]
assignmentValues = [initStatus]
if alreadyRunning:
assignments += ['_eng_cjm_conn_id=%s', 'start_time=UTC_TIMESTAMP()',
'_eng_last_update_time=UTC_TIMESTAMP()']
assignmentValues.append(self._connectionID)
else:
assignments += ['_eng_cjm_conn_id=DEFAULT', 'start_time=DEFAULT']
assignments = ', '.join(assignments)
query = 'UPDATE %s SET %s ' \
' WHERE job_id=%%s AND status=%%s' \
% (self.jobsTableName, assignments)
sqlParams = assignmentValues + [jobID, self.STATUS_COMPLETED]
numRowsAffected = conn.cursor.execute(query, sqlParams)
assert numRowsAffected <= 1, repr(numRowsAffected)
if numRowsAffected == 0:
self._logger.info(
"_resumeJobNoRetries: Redundant job-resume UPDATE: job was not "
"suspended or was resumed by another process or operation was retried "
"after connection failure; jobID=%s", jobID)
return
def getConnectionID(self):
""" Return our connection ID. This can be used for worker identification
purposes.
NOTE: the actual MySQL connection ID used in queries may change from time
to time if connection is re-acquired (e.g., upon MySQL server restart) or
when more than one entry from the connection pool has been used (e.g.,
multi-threaded apps)
"""
return self._connectionID
@logExceptions(_LOGGER)
def jobSuspend(self, jobID):
""" Requests a job to be suspended
NOTE: this is primarily for suspending Production Jobs; DO NOT use
it on Hypersearch jobs. For canceling any job type, use jobCancel() instead!
Parameters:
----------------------------------------------------------------
jobID: jobID of the job to resume
retval: nothing
"""
# TODO: validate that the job is in the appropriate state for being
# suspended: consider using a WHERE clause to make sure that
# the job is not already in the "completed" state
# TODO: when Nupic job control states get figured out, there may be a
# different way to suspend jobs ("cancel" doesn't make sense for this)
# NOTE: jobCancel() does retries on transient mysql failures
self.jobCancel(jobID)
return
@logExceptions(_LOGGER)
def jobResume(self, jobID, alreadyRunning=False):
""" Resumes processing of an existing job that is presently in the
STATUS_COMPLETED state.
NOTE: this is primarily for resuming suspended Production Jobs; DO NOT use
it on Hypersearch jobs.
NOTE: The job MUST be in the STATUS_COMPLETED state at the time of this
call, otherwise an exception will be raised.
This prepares an existing job entry to resume processing. The CJM is always
periodically sweeping the jobs table and when it finds a job that is ready
to run, will proceed to start it up on Hadoop.
Parameters:
----------------------------------------------------------------
job: jobID of the job to resume
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop.
raises: Throws a RuntimeError if no rows are affected. This could
either be because:
1) Because there was not matching jobID
2) or if the status of the job was not STATUS_COMPLETED.
retval: nothing
"""
row = self.jobGetFields(jobID, ['status'])
(jobStatus,) = row
if jobStatus != self.STATUS_COMPLETED:
raise RuntimeError(("Failed to resume job: job was not suspended; "
"jobID=%s; job status=%r") % (jobID, jobStatus))
# NOTE: on MySQL failures, we need to retry ConnectionFactory.get() as well
# in order to recover from lost connections
@g_retrySQL
def resumeWithRetries():
with ConnectionFactory.get() as conn:
self._resumeJobNoRetries(conn, jobID, alreadyRunning)
resumeWithRetries()
return
@logExceptions(_LOGGER)
def jobInsert(self, client, cmdLine, clientInfo='', clientKey='', params='',
alreadyRunning=False, minimumWorkers=0, maximumWorkers=0,
jobType='', priority=DEFAULT_JOB_PRIORITY):
""" Add an entry to the jobs table for a new job request. This is called by
clients that wish to startup a new job, like a Hypersearch, stream job, or
specific model evaluation from the engine.
This puts a new entry into the jobs table. The CJM is always periodically
sweeping the jobs table and when it finds a new job, will proceed to start it
up on Hadoop.
Parameters:
----------------------------------------------------------------
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop - it gives it a job
record to work with.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
jobType: The type of job that this is. This should be one of the
JOB_TYPE_XXXX enums. This is needed to allow a standard
way of recognizing a job's function and capabilities.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
retval: jobID - unique ID assigned to this job
"""
jobHash = self._normalizeHash(uuid.uuid1().bytes)
@g_retrySQL
def insertWithRetries():
with ConnectionFactory.get() as conn:
return self._insertOrGetUniqueJobNoRetries(
conn, client=client, cmdLine=cmdLine, jobHash=jobHash,
clientInfo=clientInfo, clientKey=clientKey, params=params,
minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers,
jobType=jobType, priority=priority, alreadyRunning=alreadyRunning)
try:
jobID = insertWithRetries()
except:
self._logger.exception(
'jobInsert FAILED: jobType=%r; client=%r; clientInfo=%r; clientKey=%r;'
'jobHash=%r; cmdLine=%r',
jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash,
cmdLine)
raise
else:
self._logger.info(
'jobInsert: returning jobID=%s. jobType=%r; client=%r; clientInfo=%r; '
'clientKey=%r; jobHash=%r; cmdLine=%r',
jobID, jobType, client, _abbreviate(clientInfo, 48), clientKey,
jobHash, cmdLine)
return jobID
@logExceptions(_LOGGER)
def jobInsertUnique(self, client, cmdLine, jobHash, clientInfo='',
clientKey='', params='', minimumWorkers=0,
maximumWorkers=0, jobType='',
priority=DEFAULT_JOB_PRIORITY):
""" Add an entry to the jobs table for a new job request, but only if the
same job, by the same client is not already running. If the job is already
running, or queued up to run, this call does nothing. If the job does not
exist in the jobs table or has completed, it will be inserted and/or started
up again.
This method is called by clients, like StreamMgr, that wish to only start up
a job if it hasn't already been started up.
Parameters:
----------------------------------------------------------------
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
jobHash: unique hash of this job. The client must insure that this
uniquely identifies this job request for the purposes
of detecting duplicates.
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
jobType: The type of job that this is. This should be one of the
JOB_TYPE_XXXX enums. This is needed to allow a standard
way of recognizing a job's function and capabilities.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
retval: jobID of the newly inserted or existing job.
"""
assert cmdLine, "Unexpected empty or None command-line: " + repr(cmdLine)
@g_retrySQL
def insertUniqueWithRetries():
jobHashValue = self._normalizeHash(jobHash)
jobID = None
with ConnectionFactory.get() as conn:
row = self._getOneMatchingRowNoRetries(
self._jobs, conn, dict(client=client, job_hash=jobHashValue),
['job_id', 'status'])
if row is not None:
(jobID, status) = row
if status == self.STATUS_COMPLETED:
# Restart existing job that had completed
query = 'UPDATE %s SET client_info=%%s, ' \
' client_key=%%s, ' \
' cmd_line=%%s, ' \
' params=%%s, ' \
' minimum_workers=%%s, ' \
' maximum_workers=%%s, ' \
' priority=%%s, '\
' _eng_job_type=%%s ' \
' WHERE (job_id=%%s AND status=%%s)' \
% (self.jobsTableName,)
sqlParams = (clientInfo, clientKey, cmdLine, params,
minimumWorkers, maximumWorkers, priority,
jobType, jobID, self.STATUS_COMPLETED)
numRowsUpdated = conn.cursor.execute(query, sqlParams)
assert numRowsUpdated <= 1, repr(numRowsUpdated)
if numRowsUpdated == 0:
self._logger.info(
"jobInsertUnique: Redundant job-reuse UPDATE: job restarted by "
"another process, values were unchanged, or operation was "
"retried after connection failure; jobID=%s", jobID)
# Restart the job, unless another process beats us to it
self._resumeJobNoRetries(conn, jobID, alreadyRunning=False)
else:
# There was no job row with matching client/jobHash, so insert one
jobID = self._insertOrGetUniqueJobNoRetries(
conn, client=client, cmdLine=cmdLine, jobHash=jobHashValue,
clientInfo=clientInfo, clientKey=clientKey, params=params,
minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers,
jobType=jobType, priority=priority, alreadyRunning=False)
return jobID
try:
jobID = insertUniqueWithRetries()
except:
self._logger.exception(
'jobInsertUnique FAILED: jobType=%r; client=%r; '
'clientInfo=%r; clientKey=%r; jobHash=%r; cmdLine=%r',
jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash,
cmdLine)
raise
else:
self._logger.info(
'jobInsertUnique: returning jobID=%s. jobType=%r; client=%r; '
'clientInfo=%r; clientKey=%r; jobHash=%r; cmdLine=%r',
jobID, jobType, client, _abbreviate(clientInfo, 48), clientKey,
jobHash, cmdLine)
return jobID
@g_retrySQL
def _startJobWithRetries(self, jobID):
""" Place the given job in STATUS_RUNNING mode; the job is expected to be
STATUS_NOTSTARTED.
NOTE: this function was factored out of jobStartNext because it's also
needed for testing (e.g., test_client_jobs_dao.py)
"""
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET status=%%s, ' \
' _eng_cjm_conn_id=%%s, ' \
' start_time=UTC_TIMESTAMP(), ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE (job_id=%%s AND status=%%s)' \
% (self.jobsTableName,)
sqlParams = [self.STATUS_RUNNING, self._connectionID,
jobID, self.STATUS_NOTSTARTED]
numRowsUpdated = conn.cursor.execute(query, sqlParams)
if numRowsUpdated != 1:
self._logger.warn('jobStartNext: numRowsUpdated=%r instead of 1; '
'likely side-effect of transient connection '
'failure', numRowsUpdated)
return
@logExceptions(_LOGGER)
def jobStartNext(self):
""" For use only by Nupic Scheduler (also known as ClientJobManager) Look
through the jobs table and see if any new job requests have been
queued up. If so, pick one and mark it as starting up and create the
model table to hold the results
Parameters:
----------------------------------------------------------------
retval: jobID of the job we are starting up, if found; None if not found
"""
# NOTE: cursor.execute('SELECT @update_id') trick is unreliable: if a
# connection loss occurs during cursor.execute, then the server-cached
# information is lost, and we cannot get the updated job ID; so, we use
# this select instead
row = self._getOneMatchingRowWithRetries(
self._jobs, dict(status=self.STATUS_NOTSTARTED), ['job_id'])
if row is None:
return None
(jobID,) = row
self._startJobWithRetries(jobID)
return jobID
@logExceptions(_LOGGER)
@g_retrySQL
def jobReactivateRunningJobs(self):
""" Look through the jobs table and reactivate all that are already in the
running state by setting their _eng_allocate_new_workers fields to True;
used by Nupic Scheduler as part of its failure-recovery procedure.
"""
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_cjm_conn_id=%%s, ' \
' _eng_allocate_new_workers=TRUE ' \
' WHERE status=%%s ' \
% (self.jobsTableName,)
conn.cursor.execute(query, [self._connectionID, self.STATUS_RUNNING])
return
@logExceptions(_LOGGER)
def jobGetDemand(self,):
""" Look through the jobs table and get the demand - minimum and maximum
number of workers requested, if new workers are to be allocated, if there
are any untended dead workers, for all running jobs.
Parameters:
----------------------------------------------------------------
retval: list of ClientJobsDAO._jobs.jobDemandNamedTuple nametuples
containing the demand - min and max workers,
allocate_new_workers, untended_dead_workers, num_failed_workers
for each running (STATUS_RUNNING) job. Empty list when there
isn't any demand.
"""
rows = self._getMatchingRowsWithRetries(
self._jobs, dict(status=self.STATUS_RUNNING),
[self._jobs.pubToDBNameDict[f]
for f in self._jobs.jobDemandNamedTuple._fields])
return [self._jobs.jobDemandNamedTuple._make(r) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def jobCancelAllRunningJobs(self):
""" Set cancel field of all currently-running jobs to true.
"""
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET cancel=TRUE WHERE status<>%%s ' \
% (self.jobsTableName,)
conn.cursor.execute(query, [self.STATUS_COMPLETED])
return
@logExceptions(_LOGGER)
@g_retrySQL
def jobCountCancellingJobs(self,):
""" Look through the jobs table and count the running jobs whose
cancel field is true.
Parameters:
----------------------------------------------------------------
retval: A count of running jobs with the cancel field set to true.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT COUNT(job_id) '\
'FROM %s ' \
'WHERE (status<>%%s AND cancel is TRUE)' \
% (self.jobsTableName,)
conn.cursor.execute(query, [self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return rows[0][0]
@logExceptions(_LOGGER)
@g_retrySQL
def jobGetCancellingJobs(self,):
""" Look through the jobs table and get the list of running jobs whose
cancel field is true.
Parameters:
----------------------------------------------------------------
retval: A (possibly empty) sequence of running job IDs with cancel field
set to true
"""
with ConnectionFactory.get() as conn:
query = 'SELECT job_id '\
'FROM %s ' \
'WHERE (status<>%%s AND cancel is TRUE)' \
% (self.jobsTableName,)
conn.cursor.execute(query, [self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return tuple(r[0] for r in rows)
@staticmethod
@logExceptions(_LOGGER)
def partitionAtIntervals(data, intervals):
""" Generator to allow iterating slices at dynamic intervals
Parameters:
----------------------------------------------------------------
data: Any data structure that supports slicing (i.e. list or tuple)
*intervals: Iterable of intervals. The sum of intervals should be less
than, or equal to the length of data.
"""
assert sum(intervals) <= len(data)
start = 0
for interval in intervals:
end = start + interval
yield data[start:end]
start = end
raise StopIteration
@staticmethod
@logExceptions(_LOGGER)
def _combineResults(result, *namedTuples):
""" Return a list of namedtuples from the result of a join query. A
single database result is partitioned at intervals corresponding to the
fields in namedTuples. The return value is the result of applying
namedtuple._make() to each of the partitions, for each of the namedTuples.
Parameters:
----------------------------------------------------------------
result: Tuple representing a single result from a database query
*namedTuples: List of named tuples.
"""
results = ClientJobsDAO.partitionAtIntervals(
result, [len(nt._fields) for nt in namedTuples])
return [nt._make(result) for nt, result in zip(namedTuples, results)]
@logExceptions(_LOGGER)
@g_retrySQL
def jobInfoWithModels(self, jobID):
""" Get all info about a job, with model details, if available.
Parameters:
----------------------------------------------------------------
job: jobID of the job to query
retval: A sequence of two-tuples if the jobID exists in the jobs
table (exeption is raised if it doesn't exist). Each two-tuple
contains an instance of jobInfoNamedTuple as the first element and
an instance of modelInfoNamedTuple as the second element. NOTE: In
the case where there are no matching model rows, a sequence of one
two-tuple will still be returned, but the modelInfoNamedTuple
fields will be None, and the jobInfoNamedTuple fields will be
populated.
"""
# Get a database connection and cursor
combinedResults = None
with ConnectionFactory.get() as conn:
# NOTE: Since we're using a LEFT JOIN on the models table, there need not
# be a matching row in the models table, but the matching row from the
# jobs table will still be returned (along with all fields from the models
# table with values of None in case there were no matchings models)
query = ' '.join([
'SELECT %s.*, %s.*' % (self.jobsTableName, self.modelsTableName),
'FROM %s' % self.jobsTableName,
'LEFT JOIN %s USING(job_id)' % self.modelsTableName,
'WHERE job_id=%s'])
conn.cursor.execute(query, (jobID,))
if conn.cursor.rowcount > 0:
combinedResults = [
ClientJobsDAO._combineResults(
result, self._jobs.jobInfoNamedTuple,
self._models.modelInfoNamedTuple
) for result in conn.cursor.fetchall()]
if combinedResults is not None:
return combinedResults
raise RuntimeError("jobID=%s not found within the jobs table" % (jobID))
@logExceptions(_LOGGER)
def jobInfo(self, jobID):
""" Get all info about a job
Parameters:
----------------------------------------------------------------
job: jobID of the job to query
retval: namedtuple containing the job info.
"""
row = self._getOneMatchingRowWithRetries(
self._jobs, dict(job_id=jobID),
[self._jobs.pubToDBNameDict[n]
for n in self._jobs.jobInfoNamedTuple._fields])
if row is None:
raise RuntimeError("jobID=%s not found within the jobs table" % (jobID))
# Create a namedtuple with the names to values
return self._jobs.jobInfoNamedTuple._make(row)
@logExceptions(_LOGGER)
@g_retrySQL
def jobSetStatus(self, jobID, status, useConnectionID=True,):
""" Change the status on the given job
Parameters:
----------------------------------------------------------------
job: jobID of the job to change status
status: new status string (ClientJobsDAO.STATUS_xxxxx)
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the job. Set
to False for hypersearch workers
"""
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET status=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE job_id=%%s' \
% (self.jobsTableName,)
sqlParams = [status, jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
result = conn.cursor.execute(query, sqlParams)
if result != 1:
raise RuntimeError("Tried to change the status of job %d to %s, but "
"this job belongs to some other CJM" % (
jobID, status))
@logExceptions(_LOGGER)
@g_retrySQL
def jobSetCompleted(self, jobID, completionReason, completionMsg,
useConnectionID = True):
""" Change the status on the given job to completed
Parameters:
----------------------------------------------------------------
job: jobID of the job to mark as completed
completionReason: completionReason string
completionMsg: completionMsg string
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the job. Set
to False for hypersearch workers
"""
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET status=%%s, ' \
' completion_reason=%%s, ' \
' completion_msg=%%s, ' \
' end_time=UTC_TIMESTAMP(), ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE job_id=%%s' \
% (self.jobsTableName,)
sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,
jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
result = conn.cursor.execute(query, sqlParams)
if result != 1:
raise RuntimeError("Tried to change the status of jobID=%s to "
"completed, but this job could not be found or "
"belongs to some other CJM" % (jobID))
@logExceptions(_LOGGER)
def jobCancel(self, jobID):
""" Cancel the given job. This will update the cancel field in the
jobs table and will result in the job being cancelled.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job to mark as completed
to False for hypersearch workers
"""
self._logger.info('Canceling jobID=%s', jobID)
# NOTE: jobSetFields does retries on transient mysql failures
self.jobSetFields(jobID, {"cancel" : True}, useConnectionID=False)
@logExceptions(_LOGGER)
def jobGetModelIDs(self, jobID):
"""Fetch all the modelIDs that correspond to a given jobID; empty sequence
if none"""
rows = self._getMatchingRowsWithRetries(self._models, dict(job_id=jobID),
['model_id'])
return [r[0] for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def getActiveJobCountForClientInfo(self, clientInfo):
""" Return the number of jobs for the given clientInfo and a status that is
not completed.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT count(job_id) ' \
'FROM %s ' \
'WHERE client_info = %%s ' \
' AND status != %%s' % self.jobsTableName
conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED])
activeJobCount = conn.cursor.fetchone()[0]
return activeJobCount
@logExceptions(_LOGGER)
@g_retrySQL
def getActiveJobCountForClientKey(self, clientKey):
""" Return the number of jobs for the given clientKey and a status that is
not completed.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT count(job_id) ' \
'FROM %s ' \
'WHERE client_key = %%s ' \
' AND status != %%s' % self.jobsTableName
conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])
activeJobCount = conn.cursor.fetchone()[0]
return activeJobCount
@logExceptions(_LOGGER)
@g_retrySQL
def getActiveJobsForClientInfo(self, clientInfo, fields=[]):
""" Fetch jobIDs for jobs in the table with optional fields given a
specific clientInfo """
# Form the sequence of field name strings that will go into the
# request
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = 'SELECT %s FROM %s ' \
'WHERE client_info = %%s ' \
' AND status != %%s' % (dbFieldsStr, self.jobsTableName)
conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return rows
@logExceptions(_LOGGER)
@g_retrySQL
def getActiveJobsForClientKey(self, clientKey, fields=[]):
""" Fetch jobIDs for jobs in the table with optional fields given a
specific clientKey """
# Form the sequence of field name strings that will go into the
# request
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = 'SELECT %s FROM %s ' \
'WHERE client_key = %%s ' \
' AND status != %%s' % (dbFieldsStr, self.jobsTableName)
conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return rows
@logExceptions(_LOGGER)
@g_retrySQL
def getJobs(self, fields=[]):
""" Fetch jobIDs for jobs in the table with optional fields """
# Form the sequence of field name strings that will go into the
# request
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = 'SELECT %s FROM %s' % (dbFieldsStr, self.jobsTableName)
conn.cursor.execute(query)
rows = conn.cursor.fetchall()
return rows
@logExceptions(_LOGGER)
@g_retrySQL
def getFieldsForActiveJobsOfType(self, jobType, fields=[]):
""" Helper function for querying the models table including relevant job
info where the job type matches the specified jobType. Only records for
which there is a matching jobId in both tables is returned, and only the
requested fields are returned in each result, assuming that there is not
a conflict. This function is useful, for example, in querying a cluster
for a list of actively running production models (according to the state
of the client jobs database). jobType must be one of the JOB_TYPE_XXXX
enumerations.
Parameters:
----------------------------------------------------------------
jobType: jobType enum
fields: list of fields to return
Returns: List of tuples containing the jobId and requested field values
"""
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = \
'SELECT DISTINCT %s ' \
'FROM %s j ' \
'LEFT JOIN %s m USING(job_id) '\
'WHERE j.status != %%s ' \
'AND _eng_job_type = %%s' % (dbFieldsStr, self.jobsTableName,
self.modelsTableName)
conn.cursor.execute(query, [self.STATUS_COMPLETED, jobType])
return conn.cursor.fetchall()
@logExceptions(_LOGGER)
def jobGetFields(self, jobID, fields):
""" Fetch the values of 1 or more fields from a job record. Here, 'fields'
is a list with the names of the fields to fetch. The names are the public
names of the fields (camelBack, not the lower_case_only form as stored in
the DB).
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record
fields: list of fields to return
Returns: A sequence of field values in the same order as the requested
field list -> [field1, field2, ...]
"""
# NOTE: jobsGetFields retries on transient mysql failures
return self.jobsGetFields([jobID], fields, requireAll=True)[0][1]
@logExceptions(_LOGGER)
def jobsGetFields(self, jobIDs, fields, requireAll=True):
""" Fetch the values of 1 or more fields from a sequence of job records.
Here, 'fields' is a sequence (list or tuple) with the names of the fields to
fetch. The names are the public names of the fields (camelBack, not the
lower_case_only form as stored in the DB).
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the job IDs passed in!!!
Parameters:
----------------------------------------------------------------
jobIDs: A sequence of jobIDs
fields: A list of fields to return for each jobID
Returns: A list of tuples->(jobID, [field1, field2,...])
"""
assert isinstance(jobIDs, self._SEQUENCE_TYPES)
assert len(jobIDs) >=1
rows = self._getMatchingRowsWithRetries(
self._jobs, dict(job_id=jobIDs),
['job_id'] + [self._jobs.pubToDBNameDict[x] for x in fields])
if requireAll and len(rows) < len(jobIDs):
# NOTE: this will also trigger if the jobIDs list included duplicates
raise RuntimeError("jobIDs %s not found within the jobs table" % (
(set(jobIDs) - set(r[0] for r in rows)),))
return [(r[0], list(r[1:])) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def jobSetFields(self, jobID, fields, useConnectionID=True,
ignoreUnchanged=False):
""" Change the values of 1 or more fields in a job. Here, 'fields' is a
dict with the name/value pairs to change. The names are the public names of
the fields (camelBack, not the lower_case_only form as stored in the DB).
This method is for private use by the ClientJobManager only.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record
fields: dictionary of fields to change
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the job. Set
to False for hypersearch workers
ignoreUnchanged: The default behavior is to throw a
RuntimeError if no rows are affected. This could either be
because:
1) Because there was not matching jobID
2) or if the data to update matched the data in the DB exactly.
Set this parameter to True if you expect case 2 and wish to
supress the error.
"""
# Form the sequecce of key=value strings that will go into the
# request
assignmentExpressions = ','.join(
["%s=%%s" % (self._jobs.pubToDBNameDict[f],) for f in fields.iterkeys()])
assignmentValues = fields.values()
query = 'UPDATE %s SET %s ' \
' WHERE job_id=%%s' \
% (self.jobsTableName, assignmentExpressions,)
sqlParams = assignmentValues + [jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
result = conn.cursor.execute(query, sqlParams)
if result != 1 and not ignoreUnchanged:
raise RuntimeError(
"Tried to change fields (%r) of jobID=%s conn_id=%r), but an error " \
"occurred. result=%r; query=%r" % (
assignmentExpressions, jobID, self._connectionID, result, query))
@logExceptions(_LOGGER)
@g_retrySQL
def jobSetFieldIfEqual(self, jobID, fieldName, newValue, curValue):
""" Change the value of 1 field in a job to 'newValue', but only if the
current value matches 'curValue'. The 'fieldName' is the public name of
the field (camelBack, not the lower_case_only form as stored in the DB).
This method is used for example by HypersearcWorkers to update the
engWorkerState field periodically. By qualifying on curValue, it insures
that only 1 worker at a time is elected to perform the next scheduled
periodic sweep of the models.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record to modify
fieldName: public field name of the field
newValue: new value of the field to set
curValue: current value to qualify against
retval: True if we successfully modified the field
False if curValue did not match
"""
# Get the private field name and string form of the value
dbFieldName = self._jobs.pubToDBNameDict[fieldName]
conditionValue = []
if isinstance(curValue, bool):
conditionExpression = '%s IS %s' % (
dbFieldName, {True:'TRUE', False:'FALSE'}[curValue])
elif curValue is None:
conditionExpression = '%s is NULL' % (dbFieldName,)
else:
conditionExpression = '%s=%%s' % (dbFieldName,)
conditionValue.append(curValue)
query = 'UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), %s=%%s ' \
' WHERE job_id=%%s AND %s' \
% (self.jobsTableName, dbFieldName, conditionExpression)
sqlParams = [newValue, jobID] + conditionValue
with ConnectionFactory.get() as conn:
result = conn.cursor.execute(query, sqlParams)
return (result == 1)
@logExceptions(_LOGGER)
@g_retrySQL
def jobIncrementIntField(self, jobID, fieldName, increment=1,
useConnectionID=False):
""" Incremet the value of 1 field in a job by increment. The 'fieldName' is
the public name of the field (camelBack, not the lower_case_only form as
stored in the DB).
This method is used for example by HypersearcWorkers to update the
engWorkerState field periodically. By qualifying on curValue, it insures
that only 1 worker at a time is elected to perform the next scheduled
periodic sweep of the models.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record to modify
fieldName: public field name of the field
increment: increment is added to the current value of the field
"""
# Get the private field name and string form of the value
dbFieldName = self._jobs.pubToDBNameDict[fieldName]
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET %s=%s+%%s ' \
' WHERE job_id=%%s' \
% (self.jobsTableName, dbFieldName, dbFieldName)
sqlParams = [increment, jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
result = conn.cursor.execute(query, sqlParams)
if result != 1:
raise RuntimeError(
"Tried to increment the field (%r) of jobID=%s (conn_id=%r), but an " \
"error occurred. result=%r; query=%r" % (
dbFieldName, jobID, self._connectionID, result, query))
@logExceptions(_LOGGER)
@g_retrySQL
def jobUpdateResults(self, jobID, results):
""" Update the results string and last-update-time fields of a model.
Parameters:
----------------------------------------------------------------
jobID: job ID of model to modify
results: new results (json dict string)
"""
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), ' \
' results=%%s ' \
' WHERE job_id=%%s' % (self.jobsTableName,)
conn.cursor.execute(query, [results, jobID])
@logExceptions(_LOGGER)
@g_retrySQL
def modelsClearAll(self):
""" Delete all models from the models table
Parameters:
----------------------------------------------------------------
"""
self._logger.info('Deleting all rows from models table %r',
self.modelsTableName)
with ConnectionFactory.get() as conn:
query = 'DELETE FROM %s' % (self.modelsTableName)
conn.cursor.execute(query)
@logExceptions(_LOGGER)
def modelInsertAndStart(self, jobID, params, paramsHash, particleHash=None):
""" Insert a new unique model (based on params) into the model table in the
"running" state. This will return two things: whether or not the model was
actually inserted (i.e. that set of params isn't already in the table) and
the modelID chosen for that set of params. Even if the model was not
inserted by this call (it was already there) the modelID of the one already
inserted is returned.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job to add models for
params: params for this model
paramsHash hash of the params, generated by the worker
particleHash hash of the particle info (for PSO). If not provided,
then paramsHash will be used.
retval: (modelID, wasInserted)
modelID: the model ID for this set of params
wasInserted: True if this call ended up inserting the
new model. False if this set of params was already in
the model table.
"""
# Fill in default particleHash
if particleHash is None:
particleHash = paramsHash
# Normalize hashes
paramsHash = self._normalizeHash(paramsHash)
particleHash = self._normalizeHash(particleHash)
def findExactMatchNoRetries(conn):
return self._getOneMatchingRowNoRetries(
self._models, conn,
{'job_id':jobID, '_eng_params_hash':paramsHash,
'_eng_particle_hash':particleHash},
['model_id', '_eng_worker_conn_id'])
@g_retrySQL
def findExactMatchWithRetries():
with ConnectionFactory.get() as conn:
return findExactMatchNoRetries(conn)
# Check if the model is already in the models table
#
# NOTE: with retries of mysql transient failures, we can't always tell
# whether the row was already inserted (e.g., comms failure could occur
# after insertion into table, but before arrival or response), so the
# need to check before attempting to insert a new row
#
# TODO: if we could be assured that the caller already verified the
# model's absence before calling us, we could skip this check here
row = findExactMatchWithRetries()
if row is not None:
return (row[0], False)
@g_retrySQL
def insertModelWithRetries():
""" NOTE: it's possible that another process on some machine is attempting
to insert the same model at the same time as the caller """
with ConnectionFactory.get() as conn:
# Create a new job entry
query = 'INSERT INTO %s (job_id, params, status, _eng_params_hash, ' \
' _eng_particle_hash, start_time, _eng_last_update_time, ' \
' _eng_worker_conn_id) ' \
' VALUES (%%s, %%s, %%s, %%s, %%s, UTC_TIMESTAMP(), ' \
' UTC_TIMESTAMP(), %%s) ' \
% (self.modelsTableName,)
sqlParams = (jobID, params, self.STATUS_RUNNING, paramsHash,
particleHash, self._connectionID)
try:
numRowsAffected = conn.cursor.execute(query, sqlParams)
except Exception, e:
# NOTE: We have seen instances where some package in the calling
# chain tries to interpret the exception message using unicode.
# Since the exception message contains binary data (the hashes), this
# can in turn generate a Unicode translation exception. So, we catch
# ALL exceptions here and look for the string "Duplicate entry" in
# the exception args just in case this happens. For example, the
# Unicode exception we might get is:
# (<type 'exceptions.UnicodeDecodeError'>, UnicodeDecodeError('utf8', "Duplicate entry '1000-?.\x18\xb1\xd3\xe0CO\x05\x8b\xf80\xd7E5\xbb' for key 'job_id'", 25, 26, 'invalid start byte'))
#
# If it weren't for this possible Unicode translation error, we
# could watch for only the exceptions we want, like this:
# except pymysql.IntegrityError, e:
# if e.args[0] != mysqlerrors.DUP_ENTRY:
# raise
if "Duplicate entry" not in str(e):
raise
# NOTE: duplicate entry scenario: however, we can't discern
# whether it was inserted by another process or this one, because an
# intermittent failure may have caused us to retry
self._logger.info('Model insert attempt failed with DUP_ENTRY: '
'jobID=%s; paramsHash=%s OR particleHash=%s; %r',
jobID, paramsHash.encode('hex'),
particleHash.encode('hex'), e)
else:
if numRowsAffected == 1:
# NOTE: SELECT LAST_INSERT_ID() returns 0 after re-connection
conn.cursor.execute('SELECT LAST_INSERT_ID()')
modelID = conn.cursor.fetchall()[0][0]
if modelID != 0:
return (modelID, True)
else:
self._logger.warn(
'SELECT LAST_INSERT_ID for model returned 0, implying loss of '
'connection: jobID=%s; paramsHash=%r; particleHash=%r',
jobID, paramsHash, particleHash)
else:
self._logger.error(
'Attempt to insert model resulted in unexpected numRowsAffected: '
'expected 1, but got %r; jobID=%s; paramsHash=%r; '
'particleHash=%r',
numRowsAffected, jobID, paramsHash, particleHash)
# Look up the model and discern whether it is tagged with our conn id
row = findExactMatchNoRetries(conn)
if row is not None:
(modelID, connectionID) = row
return (modelID, connectionID == self._connectionID)
# This set of params is already in the table, just get the modelID
query = 'SELECT (model_id) FROM %s ' \
' WHERE job_id=%%s AND ' \
' (_eng_params_hash=%%s ' \
' OR _eng_particle_hash=%%s) ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [jobID, paramsHash, particleHash]
numRowsFound = conn.cursor.execute(query, sqlParams)
assert numRowsFound == 1, (
'Model not found: jobID=%s AND (paramsHash=%r OR particleHash=%r); '
'numRowsFound=%r') % (jobID, paramsHash, particleHash, numRowsFound)
(modelID,) = conn.cursor.fetchall()[0]
return (modelID, False)
return insertModelWithRetries()
@logExceptions(_LOGGER)
def modelsInfo(self, modelIDs):
""" Get ALL info for a set of models
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the model IDs passed in!!!
Parameters:
----------------------------------------------------------------
modelIDs: list of model IDs
retval: list of nametuples containing all the fields stored for each
model.
"""
assert isinstance(modelIDs, self._SEQUENCE_TYPES), (
"wrong modelIDs type: %s") % (type(modelIDs),)
assert modelIDs, "modelIDs is empty"
rows = self._getMatchingRowsWithRetries(
self._models, dict(model_id=modelIDs),
[self._models.pubToDBNameDict[f]
for f in self._models.modelInfoNamedTuple._fields])
results = [self._models.modelInfoNamedTuple._make(r) for r in rows]
# NOTE: assetion will also fail if modelIDs contains duplicates
assert len(results) == len(modelIDs), "modelIDs not found: %s" % (
set(modelIDs) - set(r.modelId for r in results))
return results
@logExceptions(_LOGGER)
def modelsGetFields(self, modelIDs, fields):
""" Fetch the values of 1 or more fields from a sequence of model records.
Here, 'fields' is a list with the names of the fields to fetch. The names
are the public names of the fields (camelBack, not the lower_case_only form
as stored in the DB).
WARNING!!!: The order of the results are NOT necessarily in the same order
as the order of the model IDs passed in!!!
Parameters:
----------------------------------------------------------------
modelIDs: A single modelID or sequence of modelIDs
fields: A list of fields to return
Returns: If modelIDs is a sequence:
a list of tuples->(modelID, [field1, field2,...])
If modelIDs is a single modelID:
a list of field values->[field1, field2,...]
"""
assert len(fields) >= 1, 'fields is empty'
# Form the sequence of field name strings that will go into the
# request
isSequence = isinstance(modelIDs, self._SEQUENCE_TYPES)
if isSequence:
assert len(modelIDs) >=1, 'modelIDs is empty'
else:
modelIDs = [modelIDs]
rows = self._getMatchingRowsWithRetries(
self._models, dict(model_id=modelIDs),
['model_id'] + [self._models.pubToDBNameDict[f] for f in fields])
if len(rows) < len(modelIDs):
raise RuntimeError("modelIDs not found within the models table: %s" % (
(set(modelIDs) - set(r[0] for r in rows)),))
if not isSequence:
return list(rows[0][1:])
return [(r[0], list(r[1:])) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def modelsGetFieldsForJob(self, jobID, fields, ignoreKilled=False):
""" Gets the specified fields for all the models for a single job. This is
similar to modelsGetFields
Parameters:
----------------------------------------------------------------
jobID: jobID for the models to be searched
fields: A list of fields to return
ignoreKilled: (True/False). If True, this will ignore models that
have been killed
Returns: a (possibly empty) list of tuples as follows
[
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])
...
]
NOTE: since there is a window of time between a job getting inserted into
jobs table and the job's worker(s) starting up and creating models, an
empty-list result is one of the normal outcomes.
"""
assert len(fields) >= 1, 'fields is empty'
# Form the sequence of field name strings that will go into the
# request
dbFields = [self._models.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(dbFields)
query = 'SELECT model_id, %s FROM %s ' \
' WHERE job_id=%%s ' \
% (dbFieldsStr, self.modelsTableName)
sqlParams = [jobID]
if ignoreKilled:
query += ' AND (completion_reason IS NULL OR completion_reason != %s)'
sqlParams.append(self.CMPL_REASON_KILLED)
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
if rows is None:
# fetchall is defined to return a (possibly-empty) sequence of
# sequences; however, we occasionally see None returned and don't know
# why...
self._logger.error("Unexpected None result from cursor.fetchall; "
"query=%r; Traceback=%r",
query, traceback.format_exc())
return [(r[0], list(r[1:])) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def modelsGetFieldsForCheckpointed(self, jobID, fields):
"""
Gets fields from all models in a job that have been checkpointed. This is
used to figure out whether or not a new model should be checkpointed.
Parameters:
-----------------------------------------------------------------------
jobID: The jobID for the models to be searched
fields: A list of fields to return
Returns: a (possibly-empty) list of tuples as follows
[
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])
...
]
"""
assert len(fields) >= 1, "fields is empty"
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
dbFields = [self._models.pubToDBNameDict[f] for f in fields]
dbFieldStr = ", ".join(dbFields)
query = 'SELECT model_id, {fields} from {models}' \
' WHERE job_id=%s AND model_checkpoint_id IS NOT NULL'.format(
fields=dbFieldStr, models=self.modelsTableName)
conn.cursor.execute(query, [jobID])
rows = conn.cursor.fetchall()
return [(r[0], list(r[1:])) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def modelSetFields(self, modelID, fields, ignoreUnchanged = False):
""" Change the values of 1 or more fields in a model. Here, 'fields' is a
dict with the name/value pairs to change. The names are the public names of
the fields (camelBack, not the lower_case_only form as stored in the DB).
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record
fields: dictionary of fields to change
ignoreUnchanged: The default behavior is to throw a
RuntimeError if no rows are affected. This could either be
because:
1) Because there was no matching modelID
2) or if the data to update matched the data in the DB exactly.
Set this parameter to True if you expect case 2 and wish to
supress the error.
"""
# Form the sequence of key=value strings that will go into the
# request
assignmentExpressions = ','.join(
'%s=%%s' % (self._models.pubToDBNameDict[f],) for f in fields.iterkeys())
assignmentValues = fields.values()
query = 'UPDATE %s SET %s, update_counter = update_counter+1 ' \
' WHERE model_id=%%s' \
% (self.modelsTableName, assignmentExpressions)
sqlParams = assignmentValues + [modelID]
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
numAffectedRows = conn.cursor.execute(query, sqlParams)
self._logger.debug("Executed: numAffectedRows=%r, query=%r, sqlParams=%r",
numAffectedRows, query, sqlParams)
if numAffectedRows != 1 and not ignoreUnchanged:
raise RuntimeError(
("Tried to change fields (%r) of model %r (conn_id=%r), but an error "
"occurred. numAffectedRows=%r; query=%r; sqlParams=%r") % (
fields, modelID, self._connectionID, numAffectedRows, query,
sqlParams,))
@logExceptions(_LOGGER)
def modelsGetParams(self, modelIDs):
""" Get the params and paramsHash for a set of models.
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the model IDs passed in!!!
Parameters:
----------------------------------------------------------------
modelIDs: list of model IDs
retval: list of result namedtuples defined in
ClientJobsDAO._models.getParamsNamedTuple. Each tuple
contains: (modelId, params, engParamsHash)
"""
assert isinstance(modelIDs, self._SEQUENCE_TYPES), (
"Wrong modelIDs type: %r") % (type(modelIDs),)
assert len(modelIDs) >= 1, "modelIDs is empty"
rows = self._getMatchingRowsWithRetries(
self._models, {'model_id' : modelIDs},
[self._models.pubToDBNameDict[f]
for f in self._models.getParamsNamedTuple._fields])
# NOTE: assertion will also fail when modelIDs contains duplicates
assert len(rows) == len(modelIDs), "Didn't find modelIDs: %r" % (
(set(modelIDs) - set(r[0] for r in rows)),)
# Return the params and params hashes as a namedtuple
return [self._models.getParamsNamedTuple._make(r) for r in rows]
@logExceptions(_LOGGER)
def modelsGetResultAndStatus(self, modelIDs):
""" Get the results string and other status fields for a set of models.
WARNING!!!: The order of the results are NOT necessarily in the same order
as the order of the model IDs passed in!!!
For each model, this returns a tuple containing:
(modelID, results, status, updateCounter, numRecords, completionReason,
completionMsg, engParamsHash
Parameters:
----------------------------------------------------------------
modelIDs: list of model IDs
retval: list of result tuples. Each tuple contains:
(modelID, results, status, updateCounter, numRecords,
completionReason, completionMsg, engParamsHash)
"""
assert isinstance(modelIDs, self._SEQUENCE_TYPES), (
"Wrong modelIDs type: %r") % type(modelIDs)
assert len(modelIDs) >= 1, "modelIDs is empty"
rows = self._getMatchingRowsWithRetries(
self._models, {'model_id' : modelIDs},
[self._models.pubToDBNameDict[f]
for f in self._models.getResultAndStatusNamedTuple._fields])
# NOTE: assertion will also fail when modelIDs contains duplicates
assert len(rows) == len(modelIDs), "Didn't find modelIDs: %r" % (
(set(modelIDs) - set(r[0] for r in rows)),)
# Return the results as a list of namedtuples
return [self._models.getResultAndStatusNamedTuple._make(r) for r in rows]
@logExceptions(_LOGGER)
def modelsGetUpdateCounters(self, jobID):
""" Return info on all of the models that are in already in the models
table for a given job. For each model, this returns a tuple
containing: (modelID, updateCounter).
Note that we don't return the results for all models, since the results
string could be quite large. The information we are returning is
just 2 integer fields.
Parameters:
----------------------------------------------------------------
jobID: jobID to query
retval: (possibly empty) list of tuples. Each tuple contains:
(modelID, updateCounter)
"""
rows = self._getMatchingRowsWithRetries(
self._models, {'job_id' : jobID},
[self._models.pubToDBNameDict[f]
for f in self._models.getUpdateCountersNamedTuple._fields])
# Return the results as a list of namedtuples
return [self._models.getUpdateCountersNamedTuple._make(r) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def modelUpdateResults(self, modelID, results=None, metricValue =None,
numRecords=None):
""" Update the results string, and/or num_records fields of
a model. This will fail if the model does not currently belong to this
client (connection_id doesn't match).
Parameters:
----------------------------------------------------------------
modelID: model ID of model to modify
results: new results, or None to ignore
metricValue: the value of the metric being optimized, or None to ignore
numRecords: new numRecords, or None to ignore
"""
assignmentExpressions = ['_eng_last_update_time=UTC_TIMESTAMP()',
'update_counter=update_counter+1']
assignmentValues = []
if results is not None:
assignmentExpressions.append('results=%s')
assignmentValues.append(results)
if numRecords is not None:
assignmentExpressions.append('num_records=%s')
assignmentValues.append(numRecords)
# NOTE1: (metricValue==metricValue) tests for Nan
# NOTE2: metricValue is being passed as numpy.float64
if metricValue is not None and (metricValue==metricValue):
assignmentExpressions.append('optimized_metric=%s')
assignmentValues.append(float(metricValue))
query = 'UPDATE %s SET %s ' \
' WHERE model_id=%%s and _eng_worker_conn_id=%%s' \
% (self.modelsTableName, ','.join(assignmentExpressions))
sqlParams = assignmentValues + [modelID, self._connectionID]
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
numRowsAffected = conn.cursor.execute(query, sqlParams)
if numRowsAffected != 1:
raise InvalidConnectionException(
("Tried to update the info of modelID=%r using connectionID=%r, but "
"this model belongs to some other worker or modelID not found; "
"numRowsAffected=%r") % (modelID,self._connectionID, numRowsAffected,))
def modelUpdateTimestamp(self, modelID):
self.modelUpdateResults(modelID)
@logExceptions(_LOGGER)
@g_retrySQL
def modelSetCompleted(self, modelID, completionReason, completionMsg,
cpuTime=0, useConnectionID=True):
""" Mark a model as completed, with the given completionReason and
completionMsg. This will fail if the model does not currently belong to this
client (connection_id doesn't match).
Parameters:
----------------------------------------------------------------
modelID: model ID of model to modify
completionReason: completionReason string
completionMsg: completionMsg string
cpuTime: amount of CPU time spent on this model
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the
job. Set to True for hypersearch workers, which use
this mechanism for orphaned model detection.
"""
if completionMsg is None:
completionMsg = ''
query = 'UPDATE %s SET status=%%s, ' \
' completion_reason=%%s, ' \
' completion_msg=%%s, ' \
' end_time=UTC_TIMESTAMP(), ' \
' cpu_time=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP(), ' \
' update_counter=update_counter+1 ' \
' WHERE model_id=%%s' \
% (self.modelsTableName,)
sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,
cpuTime, modelID]
if useConnectionID:
query += " AND _eng_worker_conn_id=%s"
sqlParams.append(self._connectionID)
with ConnectionFactory.get() as conn:
numRowsAffected = conn.cursor.execute(query, sqlParams)
if numRowsAffected != 1:
raise InvalidConnectionException(
("Tried to set modelID=%r using connectionID=%r, but this model "
"belongs to some other worker or modelID not found; "
"numRowsAffected=%r") % (modelID, self._connectionID, numRowsAffected))
@logExceptions(_LOGGER)
def modelAdoptNextOrphan(self, jobId, maxUpdateInterval):
""" Look through the models table for an orphaned model, which is a model
that is not completed yet, whose _eng_last_update_time is more than
maxUpdateInterval seconds ago.
If one is found, change its _eng_worker_conn_id to the current worker's
and return the model id.
Parameters:
----------------------------------------------------------------
retval: modelId of the model we adopted, or None if none found
"""
@g_retrySQL
def findCandidateModelWithRetries():
modelID = None
with ConnectionFactory.get() as conn:
# TODO: may need a table index on job_id/status for speed
query = 'SELECT model_id FROM %s ' \
' WHERE status=%%s ' \
' AND job_id=%%s ' \
' AND TIMESTAMPDIFF(SECOND, ' \
' _eng_last_update_time, ' \
' UTC_TIMESTAMP()) > %%s ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [self.STATUS_RUNNING, jobId, maxUpdateInterval]
numRows = conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
assert numRows <= 1, "Unexpected numRows: %r" % numRows
if numRows == 1:
(modelID,) = rows[0]
return modelID
@g_retrySQL
def adoptModelWithRetries(modelID):
adopted = False
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_worker_conn_id=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE model_id=%%s ' \
' AND status=%%s' \
' AND TIMESTAMPDIFF(SECOND, ' \
' _eng_last_update_time, ' \
' UTC_TIMESTAMP()) > %%s ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [self._connectionID, modelID, self.STATUS_RUNNING,
maxUpdateInterval]
numRowsAffected = conn.cursor.execute(query, sqlParams)
assert numRowsAffected <= 1, 'Unexpected numRowsAffected=%r' % (
numRowsAffected,)
if numRowsAffected == 1:
adopted = True
else:
# Discern between transient failure during update and someone else
# claiming this model
(status, connectionID) = self._getOneMatchingRowNoRetries(
self._models, conn, {'model_id':modelID},
['status', '_eng_worker_conn_id'])
adopted = (status == self.STATUS_RUNNING and
connectionID == self._connectionID)
return adopted
adoptedModelID = None
while True:
modelID = findCandidateModelWithRetries()
if modelID is None:
break
if adoptModelWithRetries(modelID):
adoptedModelID = modelID
break
return adoptedModelID
#def testClientJobsDAO():
# # WARNING: these tests assume that Nupic Scheduler is not running, and bad
# # things will happen if the test is executed while the Scheduler is running
#
# # TODO: This test code is out of date: e.g., at the time of this writing,
# # jobStartNext() advances a job's status to STATUS_RUNNING instead of
# # STATUS_STARTING; etc.
#
# import time
# import hashlib
# import pprint
#
# # Clear out the database
# cjDAO = ClientJobsDAO.get()
# cjDAO.connect(deleteOldVersions=True, recreate=True)
#
#
# # --------------------------------------------------------------------
# # Test inserting a new job that doesn't have to be unique
# jobID1 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params')
# print "Inserted job %d" % (jobID1)
#
# jobID2 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params')
# print "Inserted job %d" % (jobID2)
#
#
# # --------------------------------------------------------------------
# # Test starting up those jobs
# jobID = cjDAO.jobStartNext()
# print "started job %d" % (jobID)
# assert (jobID == jobID1)
# info = cjDAO.jobInfo(jobID)
# print "jobInfo:"
# pprint.pprint(info)
# assert (info.status == cjDAO.STATUS_STARTING)
#
# jobID = cjDAO.jobStartNext()
# print "started job %d" % (jobID)
# assert (jobID == jobID2)
# info = cjDAO.jobInfo(jobID)
# print "jobInfo:"
# pprint.pprint(info)
# assert (info.status == cjDAO.STATUS_STARTING)
#
#
# # --------------------------------------------------------------------
# # Test inserting a unique job
# jobHash = '01234'
# (success, jobID3) = cjDAO.jobInsertUnique(client='testuniq',
# cmdLine='echo hi',
# jobHash=jobHash, clientInfo='client info', params='job params')
# print "Inserted unique job %d" % (jobID3)
# assert (success)
#
# # This should return the same jobID
# (success, jobID4) = cjDAO.jobInsertUnique(client='testuniq',
# cmdLine='echo hi',
# jobHash=jobHash, clientInfo='client info', params='job params')
# print "tried to insert again %d" % (jobID4)
# assert (not success and jobID4 == jobID3)
#
#
# # Mark it as completed
# jobID = cjDAO.jobStartNext()
# assert (jobID == jobID3)
# cjDAO.jobSetStatus(jobID3, cjDAO.STATUS_COMPLETED)
#
#
# # This should return success
# (success, jobID4) = cjDAO.jobInsertUnique(client='testuniq',
# cmdLine='echo hi',
# jobHash=jobHash, clientInfo='client info', params='job params')
# print "Inserted unique job %d" % (jobID4)
# assert (success)
#
#
# # --------------------------------------------------------------------
# # Test inserting a pre-started job
# jobID5 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params',
# alreadyRunning=True)
# print "Inserted prestarted job %d" % (jobID5)
#
# info = cjDAO.jobInfo(jobID5)
# print "jobInfo:"
# pprint.pprint(info)
# assert (info.status == cjDAO.STATUS_TESTMODE)
#
#
#
# # --------------------------------------------------------------------
# # Test the jobInfo and jobSetFields calls
# jobInfo = cjDAO.jobInfo(jobID2)
# print "job info:"
# pprint.pprint(jobInfo)
# newFields = dict(maximumWorkers=43)
# cjDAO.jobSetFields(jobID2, newFields)
# jobInfo = cjDAO.jobInfo(jobID2)
# assert(jobInfo.maximumWorkers == newFields['maximumWorkers'])
#
#
# # --------------------------------------------------------------------
# # Test the jobGetFields call
# values = cjDAO.jobGetFields(jobID2, ['maximumWorkers'])
# assert (values[0] == newFields['maximumWorkers'])
#
#
# # --------------------------------------------------------------------
# # Test the jobSetFieldIfEqual call
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == None)
#
# # Change from None to test
# success = cjDAO.jobSetFieldIfEqual(jobID2, 'engWorkerState',
# newValue='test', curValue=None)
# assert (success)
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == 'test')
#
# # Change from test1 to test2 (should fail)
# success = cjDAO.jobSetFieldIfEqual(jobID2, 'engWorkerState',
# newValue='test2', curValue='test1')
# assert (not success)
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == 'test')
#
# # Change from test to test2
# success = cjDAO.jobSetFieldIfEqual(jobID2, 'engWorkerState',
# newValue='test2', curValue='test')
# assert (success)
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == 'test2')
#
# # Change from test2 to None
# success = cjDAO.jobSetFieldIfEqual(jobID2, 'engWorkerState',
# newValue=None, curValue='test2')
# assert (success)
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == None)
#
#
# # --------------------------------------------------------------------
# # Test job demands
# jobID6 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params',
# minimumWorkers=1, maximumWorkers=1,
# alreadyRunning=False)
# jobID7 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params',
# minimumWorkers=4, maximumWorkers=10,
# alreadyRunning=False)
# cjDAO.jobSetStatus(jobID6, ClientJobsDAO.STATUS_RUNNING,
# useConnectionID=False,)
# cjDAO.jobSetStatus(jobID7, ClientJobsDAO.STATUS_RUNNING,
# useConnectionID=False,)
# jobsDemand = cjDAO.jobGetDemand()
# assert (jobsDemand[0].minimumWorkers==1 and jobsDemand[0].maximumWorkers==1)
# assert (jobsDemand[1].minimumWorkers==4 and jobsDemand[1].maximumWorkers==10)
# assert (jobsDemand[0].engAllocateNewWorkers == True and \
# jobsDemand[0].engUntendedDeadWorkers == False)
#
# # Test increment field
# values = cjDAO.jobGetFields(jobID7, ['numFailedWorkers'])
# assert (values[0] == 0)
# cjDAO.jobIncrementIntField(jobID7, 'numFailedWorkers', 1)
# values = cjDAO.jobGetFields(jobID7, ['numFailedWorkers'])
# assert (values[0] == 1)
#
# # --------------------------------------------------------------------
# # Test inserting new models
#
# params = "params1"
# hash1 = hashlib.md5(params).digest()
# (modelID1, ours) = cjDAO.modelInsertAndStart(jobID, params, hash1)
# print "insert %s,%s:" % (params, hash1.encode('hex')), modelID1, ours
# assert (ours)
#
# params = "params2"
# hash2 = hashlib.md5(params).digest()
# (modelID2, ours) = cjDAO.modelInsertAndStart(jobID, params, hash2)
# print "insert %s,%s:" % (params, hash2.encode('hex')), modelID2, ours
# assert (ours)
#
# params = "params3"
# hash3 = hashlib.md5(params).digest()
# (modelID3, ours) = cjDAO.modelInsertAndStart(jobID, params, hash3)
# print "insert %s,%s:" % (params, hash3.encode('hex')), modelID3, ours
# assert (ours)
#
# params = "params4"
# hash4 = hashlib.md5(params).digest()
# (modelID4, ours) = cjDAO.modelInsertAndStart(jobID, params, hash4)
# print "insert %s,%s:" % (params, hash4.encode('hex')), modelID4, ours
# assert (ours)
#
# params = "params5"
# hash5 = hashlib.md5(params).digest()
# (modelID5, ours) = cjDAO.modelInsertAndStart(jobID, params, hash5)
# print "insert %s,%s:" % (params, hash5.encode('hex')), modelID5, ours
# assert (ours)
#
#
# # Try to insert the same model again
# params = "params2"
# hash = hashlib.md5(params).digest()
# (modelID, ours) = cjDAO.modelInsertAndStart(jobID, params, hash)
# print "insert %s,%s:" % (params, hash.encode('hex')), modelID, ours
# assert (not ours and modelID == modelID2)
#
#
# # ---------------------------------------------------------------
# # Test inserting models with unique particle hashes
# params = "params6"
# paramsHash = hashlib.md5(params).digest()
# particle = "particle6"
# particleHash = hashlib.md5(particle).digest()
# (modelID6, ours) = cjDAO.modelInsertAndStart(jobID, params, paramsHash,
# particleHash)
# print "insert %s,%s,%s:" % (params, paramsHash.encode('hex'),
# particleHash.encode('hex')), modelID6, ours
# assert (ours)
#
# # Should fail if we insert with the same params hash
# params = "params6"
# paramsHash = hashlib.md5(params).digest()
# particle = "particleUnique"
# particleHash = hashlib.md5(particle).digest()
# (modelID, ours) = cjDAO.modelInsertAndStart(jobID, params, paramsHash,
# particleHash)
# print "insert %s,%s,%s:" % (params, paramsHash.encode('hex'),
# particleHash.encode('hex')), modelID6, ours
# assert (not ours and modelID == modelID6)
#
# # Should fail if we insert with the same particle hash
# params = "paramsUnique"
# paramsHash = hashlib.md5(params).digest()
# particle = "particle6"
# particleHash = hashlib.md5(particle).digest()
# (modelID, ours) = cjDAO.modelInsertAndStart(jobID, params, paramsHash,
# particleHash)
# print "insert %s,%s,%s:" % (params, paramsHash.encode('hex'),
# particleHash.encode('hex')), modelID6, ours
# assert (not ours and modelID == modelID6)
#
#
#
# # --------------------------------------------------------------------
# # Test getting params for a set of models
# paramsAndHash = cjDAO.modelsGetParams([modelID1, modelID2])
# print "modelID, params, paramsHash of %s:" % ([modelID1, modelID2])
# for (modelID, params, hash) in paramsAndHash:
# print " ", modelID, params, hash.encode('hex')
# if modelID == modelID1:
# assert (params == "params1" and hash == hash1)
# elif modelID == modelID2:
# assert (params == "params2" and hash == hash2)
# else:
# assert (false)
#
#
# # Set some to notstarted
# #cjDAO.modelUpdateStatus(modelID2, status=cjDAO.STATUS_NOTSTARTED)
# #cjDAO.modelUpdateStatus(modelID3, status=cjDAO.STATUS_NOTSTARTED)
#
#
# # --------------------------------------------------------------------
# # Test Update model info
# cjDAO.modelUpdateResults(modelID2, results="hi there")
# cjDAO.modelUpdateResults(modelID3, numRecords=100)
# cjDAO.modelUpdateResults(modelID3, numRecords=110)
# cjDAO.modelUpdateResults(modelID4, results="bye", numRecords=42)
# cjDAO.modelUpdateResults(modelID5, results="hello", numRecords=4)
#
#
# # Test setCompleted
# cjDAO.modelSetCompleted(modelID5, completionReason=cjDAO.CMPL_REASON_EOF,
# completionMsg="completion message")
#
# # --------------------------------------------------------------------------
# # Test the GetResultsAndStatus call
# results = cjDAO.modelsGetResultAndStatus([modelID1, modelID2, modelID3,
# modelID4, modelID5])
# assert (len(results) == 5)
# for (modelID, results, status, updateCounter, numRecords,
# completionReason, completionMsg, engParamsHash,
# engMatured) in results:
# if modelID == modelID1:
# assert (status == cjDAO.STATUS_RUNNING)
# assert (updateCounter == 0)
# elif modelID == modelID2:
# assert (results == 'hi there')
# assert (updateCounter == 1)
# elif modelID == modelID3:
# assert (numRecords == 110)
# assert (updateCounter == 2)
# elif modelID == modelID4:
# assert (updateCounter == 1)
# assert (results == 'bye')
# assert (numRecords == 42)
# elif modelID == modelID5:
# assert (updateCounter == 2)
# assert (results == 'hello')
# assert (numRecords == 4)
# assert (status == cjDAO.STATUS_COMPLETED)
# assert (completionReason == cjDAO.CMPL_REASON_EOF)
# assert (completionMsg == "completion message")
# else:
# assert (False)
#
# # --------------------------------------------------------------------------
# # Test the ModelsInfo call
# mInfos = cjDAO.modelsInfo([modelID1, modelID2, modelID3,
# modelID4, modelID5])
# assert (len(results) == 5)
# for info in mInfos:
# modelID = info.modelId
# if modelID == modelID1:
# assert (info.status == cjDAO.STATUS_RUNNING)
# assert (info.updateCounter == 0)
# elif modelID == modelID2:
# assert (info.results == 'hi there')
# assert (info.updateCounter == 1)
# elif modelID == modelID3:
# assert (info.numRecords == 110)
# assert (info.updateCounter == 2)
# elif modelID == modelID4:
# assert (info.updateCounter == 1)
# assert (info.results == 'bye')
# assert (info.numRecords == 42)
# elif modelID == modelID5:
# assert (info.updateCounter == 2)
# assert (info.results == 'hello')
# assert (info.numRecords == 4)
# assert (info.status == cjDAO.STATUS_COMPLETED)
# assert (info.completionReason == cjDAO.CMPL_REASON_EOF)
# assert (info.completionMsg == "completion message")
# else:
# assert (False)
#
#
# # Test the GetUpdateCounters call
# results = cjDAO.modelsGetUpdateCounters(jobID)
# print " all models update counters:", results
# expResults = set(((modelID1, 0), (modelID2, 1), (modelID3, 2),
# (modelID4, 1), (modelID5, 2), (modelID6, 0)))
# diff = expResults.symmetric_difference(results)
# assert (len(diff) == 0)
#
#
# # -------------------------------------------------------------------
# # Test the model orphan logic
# for modelID in [modelID1, modelID2, modelID3, modelID4, modelID5, modelID6]:
# cjDAO.modelUpdateResults(modelID, results="hi there")
# orphanedModel = cjDAO.modelAdoptNextOrphan(jobID, maxUpdateInterval=10.0)
# if orphanedModel is not None:
# print "Unexpected orphan: ", orphanedModel
# assert (orphanedModel is None)
# print "Waiting 2 seconds for model to expire..."
# time.sleep(2)
# orphanedModel = cjDAO.modelAdoptNextOrphan(jobID, maxUpdateInterval=1.0)
# assert (orphanedModel is not None)
# print "Adopted model", orphanedModel
#
# print "\nAll tests pass."
helpString = \
"""%prog [options]
This script runs the ClientJobsDAO as a command line tool, for executing
unit tests or for obtaining specific information about the ClientJobsDAO
required for code written in languages other than python.
"""
if __name__ == "__main__":
"""
Launch the ClientJobsDAO from the command line. This can be done to obtain
specific information about the ClientJobsDAO when languages other than python
(i.e. Java) are used.
"""
# Parse command line options
parser = OptionParser(helpString)
parser.add_option("--getDBName", action="store_true", default=False,
help="Print the name of the database that will be used to stdout "
" [default: %default]")
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) > 0:
parser.error("Didn't expect any arguments.")
# Print DB name?
if options.getDBName:
cjDAO = ClientJobsDAO()
print cjDAO.dbName
| 135,759 | Python | .py | 2,740 | 41.866788 | 199 | 0.625453 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,036 | __init__.py | numenta_nupic-legacy/src/nupic/database/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,037 | connection.py | numenta_nupic-legacy/src/nupic/database/connection.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import logging
import platform
import traceback
from DBUtils import SteadyDB
from DBUtils.PooledDB import PooledDB
import pymysql
from nupic.support.configuration import Configuration
_MODULE_NAME = "nupic.database.Connection"
g_max_concurrency = None
g_max_concurrency_raise_exception = False
""" This flag controls a diagnostic feature for debugging unexpected concurrency
in acquiring ConnectionWrapper instances.
The value None (default) disables this feature.
enableConcurrencyChecks() and disableConcurrencyChecks() are the public API
functions for controlling this diagnostic feature.
When g_max_concurrency is exceeded, this module will log useful info (backtraces
of concurrent connection acquisitions). If g_max_concurrency_raise_exception is
true, it will also raise ConcurrencyExceededError with helpful information.
"""
class ConcurrencyExceededError(Exception):
""" This exception is raised when g_max_concurrency is exceeded """
pass
def enableConcurrencyChecks(maxConcurrency, raiseException=True):
""" Enable the diagnostic feature for debugging unexpected concurrency in
acquiring ConnectionWrapper instances.
NOTE: This MUST be done early in your application's execution, BEFORE any
accesses to ConnectionFactory or connection policies from your application
(including imports and sub-imports of your app).
Parameters:
----------------------------------------------------------------
maxConcurrency: A non-negative integer that represents the maximum expected
number of outstanding connections. When this value is
exceeded, useful information will be logged and, depending
on the value of the raiseException arg,
ConcurrencyExceededError may be raised.
raiseException: If true, ConcurrencyExceededError will be raised when
maxConcurrency is exceeded.
"""
global g_max_concurrency, g_max_concurrency_raise_exception
assert maxConcurrency >= 0
g_max_concurrency = maxConcurrency
g_max_concurrency_raise_exception = raiseException
return
def disableConcurrencyChecks():
global g_max_concurrency, g_max_concurrency_raise_exception
g_max_concurrency = None
g_max_concurrency_raise_exception = False
return
class ConnectionFactory(object):
""" Database connection factory.
WARNING: Minimize the scope of connection ownership to cover
only the execution of SQL statements in order to avoid creating multiple
outstanding SQL connections in gevent-based apps (e.g.,
ProductionWorker) when polling code that calls timer.sleep()
executes in the scope of an outstanding SQL connection, allowing a
context switch to another greenlet that may also acquire an SQL connection.
This is highly undesirable because SQL/RDS servers allow a limited number
of connections. So, release connections before calling into any other code.
Since connections are pooled by default, the overhead of calling
ConnectionFactory.get() is insignificant.
Usage Examples:
# Add Context Manager (with ...) support for Jython/Python 2.5.x, if needed
from __future__ import with_statement
example1 (preferred):
with ConnectionFactory.get() as conn:
conn.cursor.execute("SELECT ...")
example2 (if 'with' statement can't be used for some reason):
conn = ConnectionFactory.get()
try:
conn.cursor.execute("SELECT ...")
finally:
conn.release()
"""
@classmethod
def get(cls):
""" Acquire a ConnectionWrapper instance that represents a connection
to the SQL server per nupic.cluster.database.* configuration settings.
NOTE: caller is responsible for calling the ConnectionWrapper instance's
release() method after using the connection in order to release resources.
Better yet, use the returned ConnectionWrapper instance in a Context Manager
statement for automatic invocation of release():
Example:
# If using Jython 2.5.x, first import with_statement at the very top of
your script (don't need this import for Jython/Python 2.6.x and later):
from __future__ import with_statement
# Then:
from nupic.database.Connection import ConnectionFactory
# Then use it like this
with ConnectionFactory.get() as conn:
conn.cursor.execute("SELECT ...")
conn.cursor.fetchall()
conn.cursor.execute("INSERT ...")
WARNING: DO NOT close the underlying connection or cursor as it may be
shared by other modules in your process. ConnectionWrapper's release()
method will do the right thing.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller is responsible
for releasing resources as described above.
"""
if cls._connectionPolicy is None:
logger = _getLogger(cls)
logger.info("Creating db connection policy via provider %r",
cls._connectionPolicyInstanceProvider)
cls._connectionPolicy = cls._connectionPolicyInstanceProvider()
logger.debug("Created connection policy: %r", cls._connectionPolicy)
return cls._connectionPolicy.acquireConnection()
@classmethod
def close(cls):
""" Close ConnectionFactory's connection policy. Typically, there is no need
to call this method as the system will automatically close the connections
when the process exits.
NOTE: This method should be used with CAUTION. It is designed to be
called ONLY by the code responsible for startup and shutdown of the process
since it closes the connection(s) used by ALL clients in this process.
"""
if cls._connectionPolicy is not None:
cls._connectionPolicy.close()
cls._connectionPolicy = None
return
@classmethod
def setConnectionPolicyProvider(cls, provider):
""" Set the method for ConnectionFactory to use when it needs to
instantiate its database connection policy.
NOTE: This method should be used with CAUTION. ConnectionFactory's default
behavior should be adequate for all NuPIC code, and this method is provided
primarily for diagnostics. It is designed to only be called by the code
responsible for startup of the process since the provider method has no
impact after ConnectionFactory's connection policy instance is instantiated.
See ConnectionFactory._createDefaultPolicy
Parameters:
----------------------------------------------------------------
provider: The method that instantiates the singleton database
connection policy to be used by ConnectionFactory class.
The method must be compatible with the following signature:
<DatabaseConnectionPolicyIface subclass instance> provider()
"""
cls._connectionPolicyInstanceProvider = provider
return
@classmethod
def _createDefaultPolicy(cls):
""" [private] Create the default database connection policy instance
Parameters:
----------------------------------------------------------------
retval: The default database connection policy instance
"""
logger = _getLogger(cls)
logger.debug(
"Creating database connection policy: platform=%r; pymysql.VERSION=%r",
platform.system(), pymysql.VERSION)
if platform.system() == "Java":
# NOTE: PooledDB doesn't seem to work under Jython
# NOTE: not appropriate for multi-threaded applications.
# TODO: this was fixed in Webware DBUtils r8228, so once
# we pick up a realease with this fix, we should use
# PooledConnectionPolicy for both Jython and Python.
policy = SingleSharedConnectionPolicy()
else:
policy = PooledConnectionPolicy()
return policy
_connectionPolicy = None
""" Our singleton database connection policy instance """
_connectionPolicyInstanceProvider = _createDefaultPolicy
""" This class variable holds the method that DatabaseConnectionPolicy uses
to create the singleton database connection policy instance
"""
# <-- End of class ConnectionFactory
class ConnectionWrapper(object):
""" An instance of this class is returned by
acquireConnection() methods of our database connection policy classes.
"""
_clsNumOutstanding = 0
""" For tracking the count of outstanding instances """
_clsOutstandingInstances = set()
""" tracks outstanding instances of this class while g_max_concurrency is
enabled
"""
def __init__(self, dbConn, cursor, releaser, logger):
"""
Parameters:
----------------------------------------------------------------
dbConn: the underlying database connection instance
cursor: database cursor
releaser: a method to call to release the connection and cursor;
method signature:
None dbConnReleaser(dbConn, cursor)
"""
global g_max_concurrency
try:
self._logger = logger
self.dbConn = dbConn
""" database connection instance """
self.cursor = cursor
""" Public cursor instance. Don't close it directly: Connection.release()
will do the right thing.
"""
self._releaser = releaser
self._addedToInstanceSet = False
""" True if we added self to _clsOutstandingInstances """
self._creationTracebackString = None
""" Instance creation traceback string (if g_max_concurrency is enabled) """
if g_max_concurrency is not None:
# NOTE: must be called *before* _clsNumOutstanding is incremented
self._trackInstanceAndCheckForConcurrencyViolation()
logger.debug("Acquired: %r; numOutstanding=%s",
self, self._clsNumOutstanding)
except:
logger.exception("Exception while instantiating %r;", self)
# Clean up and re-raise
if self._addedToInstanceSet:
self._clsOutstandingInstances.remove(self)
releaser(dbConn=dbConn, cursor=cursor)
raise
else:
self.__class__._clsNumOutstanding += 1
return
def __repr__(self):
return "%s<dbConn=%r, dbConnImpl=%r, cursor=%r, creationTraceback=%r>" % (
self.__class__.__name__, self.dbConn,
getattr(self.dbConn, "_con", "unknown"),
self.cursor, self._creationTracebackString,)
def __enter__(self):
""" [Context Manager protocol method] Permit a ConnectionWrapper instance
to be used in a context manager expression (with ... as:) to facilitate
robust release of resources (instead of try:/finally:/release()). See
examples in ConnectionFactory docstring.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
""" [Context Manager protocol method] Release resources. """
self.release()
# Return False to allow propagation of exception, if any
return False
def release(self):
""" Release the database connection and cursor
The receiver of the Connection instance MUST call this method in order
to reclaim resources
"""
self._logger.debug("Releasing: %r", self)
# Discard self from set of outstanding instances
if self._addedToInstanceSet:
try:
self._clsOutstandingInstances.remove(self)
except:
self._logger.exception(
"Failed to remove self from _clsOutstandingInstances: %r;", self)
raise
self._releaser(dbConn=self.dbConn, cursor=self.cursor)
self.__class__._clsNumOutstanding -= 1
assert self._clsNumOutstanding >= 0, \
"_clsNumOutstanding=%r" % (self._clsNumOutstanding,)
self._releaser = None
self.cursor = None
self.dbConn = None
self._creationTracebackString = None
self._addedToInstanceSet = False
self._logger = None
return
def _trackInstanceAndCheckForConcurrencyViolation(self):
""" Check for concurrency violation and add self to
_clsOutstandingInstances.
ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is
incremented
"""
global g_max_concurrency, g_max_concurrency_raise_exception
assert g_max_concurrency is not None
assert self not in self._clsOutstandingInstances, repr(self)
# Populate diagnostic info
self._creationTracebackString = traceback.format_stack()
# Check for concurrency violation
if self._clsNumOutstanding >= g_max_concurrency:
# NOTE: It's possible for _clsNumOutstanding to be greater than
# len(_clsOutstandingInstances) if concurrency check was enabled after
# unrelease allocations.
errorMsg = ("With numOutstanding=%r, exceeded concurrency limit=%r "
"when requesting %r. OTHER TRACKED UNRELEASED "
"INSTANCES (%s): %r") % (
self._clsNumOutstanding, g_max_concurrency, self,
len(self._clsOutstandingInstances), self._clsOutstandingInstances,)
self._logger.error(errorMsg)
if g_max_concurrency_raise_exception:
raise ConcurrencyExceededError(errorMsg)
# Add self to tracked instance set
self._clsOutstandingInstances.add(self)
self._addedToInstanceSet = True
return
class DatabaseConnectionPolicyIface(object):
""" Database connection policy base class/interface.
NOTE: We can't use the abc (abstract base class) module because
Jython 2.5.x does not support abc
"""
def close(self):
""" Close the policy instance and its shared database connection. """
raise NotImplementedError()
def acquireConnection(self):
""" Get a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance.
Caller is responsible for calling the ConnectionWrapper
instance's release() method to release resources.
"""
raise NotImplementedError()
class SingleSharedConnectionPolicy(DatabaseConnectionPolicyIface):
""" This connection policy maintains a single shared database connection.
NOTE: this type of connection policy is not appropriate for muti-threaded
applications."""
def __init__(self):
""" Consruct an instance. The instance's open() method must be
called to make it ready for acquireConnection() calls.
"""
self._logger = _getLogger(self.__class__)
self._conn = SteadyDB.connect(** _getCommonSteadyDBArgsDict())
self._logger.debug("Created %s", self.__class__.__name__)
return
def close(self):
""" Close the policy instance and its shared database connection. """
self._logger.info("Closing")
if self._conn is not None:
self._conn.close()
self._conn = None
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return
def acquireConnection(self):
""" Get a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
# Check connection and attempt to re-establish it if it died (this is
# what PooledDB does)
self._conn._ping_check()
connWrap = ConnectionWrapper(dbConn=self._conn,
cursor=self._conn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap
def _releaseConnection(self, dbConn, cursor):
""" Release database connection and cursor; passed as a callback to
ConnectionWrapper
"""
self._logger.debug("Releasing connection")
# Close the cursor
cursor.close()
# NOTE: we don't release the connection, since this connection policy is
# sharing a single connection instance
return
class PooledConnectionPolicy(DatabaseConnectionPolicyIface):
"""This connection policy maintains a pool of connections that are doled out
as needed for each transaction. NOTE: Appropriate for multi-threaded
applications. NOTE: The connections are NOT shared concurrently between
threads.
"""
def __init__(self):
""" Consruct an instance. The instance's open() method must be
called to make it ready for acquireConnection() calls.
"""
self._logger = _getLogger(self.__class__)
self._logger.debug("Opening")
self._pool = PooledDB(**_getCommonSteadyDBArgsDict())
self._logger.info("Created %s", self.__class__.__name__)
return
def close(self):
""" Close the policy instance and its database connection pool. """
self._logger.info("Closing")
if self._pool is not None:
self._pool.close()
self._pool = None
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return
def acquireConnection(self):
""" Get a connection from the pool.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
dbConn = self._pool.connection(shareable=False)
connWrap = ConnectionWrapper(dbConn=dbConn,
cursor=dbConn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap
def _releaseConnection(self, dbConn, cursor):
""" Release database connection and cursor; passed as a callback to
ConnectionWrapper
"""
self._logger.debug("Releasing connection")
# Close the cursor
cursor.close()
# ... then return db connection back to the pool
dbConn.close()
return
class PerTransactionConnectionPolicy(DatabaseConnectionPolicyIface):
"""This connection policy establishes/breaks a new connection for every
high-level transaction (i.e., API call).
NOTE: this policy is intended for debugging, as it is generally not performant
to establish and tear down db connections for every API call.
"""
def __init__(self):
""" Consruct an instance. The instance's open() method must be
called to make it ready for acquireConnection() calls.
"""
self._logger = _getLogger(self.__class__)
self._opened = True
self._logger.info("Created %s", self.__class__.__name__)
return
def close(self):
""" Close the policy instance. """
self._logger.info("Closing")
if self._opened:
self._opened = False
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return
def acquireConnection(self):
""" Create a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
dbConn = SteadyDB.connect(** _getCommonSteadyDBArgsDict())
connWrap = ConnectionWrapper(dbConn=dbConn,
cursor=dbConn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap
def _releaseConnection(self, dbConn, cursor):
""" Release database connection and cursor; passed as a callback to
ConnectionWrapper
"""
self._logger.debug("Releasing connection")
# Close the cursor
cursor.close()
# ... then close the database connection
dbConn.close()
return
def _getCommonSteadyDBArgsDict():
""" Returns a dictionary of arguments for DBUtils.SteadyDB.SteadyDBConnection
constructor.
"""
return dict(
creator = pymysql,
host = Configuration.get('nupic.cluster.database.host'),
port = int(Configuration.get('nupic.cluster.database.port')),
user = Configuration.get('nupic.cluster.database.user'),
passwd = Configuration.get('nupic.cluster.database.passwd'),
charset = 'utf8',
use_unicode = True,
setsession = ['SET AUTOCOMMIT = 1'])
def _getLogger(cls, logLevel=None):
""" Gets a logger for the given class in this module
"""
logger = logging.getLogger(
".".join(['com.numenta', _MODULE_NAME, cls.__name__]))
if logLevel is not None:
logger.setLevel(logLevel)
return logger
| 22,041 | Python | .py | 494 | 38.163968 | 82 | 0.680959 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,038 | complete-algo-example.py | numenta_nupic-legacy/docs/examples/algo/complete-algo-example.py | import csv
import datetime
import numpy
import os
import yaml
from nupic.algorithms.sdr_classifier_factory import SDRClassifierFactory
from nupic.algorithms.spatial_pooler import SpatialPooler
from nupic.algorithms.temporal_memory import TemporalMemory
from nupic.encoders.date import DateEncoder
from nupic.encoders.random_distributed_scalar import \
RandomDistributedScalarEncoder
_NUM_RECORDS = 3000
_EXAMPLE_DIR = os.path.dirname(os.path.abspath(__file__))
_INPUT_FILE_PATH = os.path.join(_EXAMPLE_DIR, os.pardir, "data", "gymdata.csv")
_PARAMS_PATH = os.path.join(_EXAMPLE_DIR, os.pardir, "params", "model.yaml")
def runHotgym(numRecords):
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"]
enParams = modelParams["sensorParams"]["encoders"]
spParams = modelParams["spParams"]
tmParams = modelParams["tmParams"]
timeOfDayEncoder = DateEncoder(
timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
weekendEncoder = DateEncoder(
weekend=enParams["timestamp_weekend"]["weekend"])
scalarEncoder = RandomDistributedScalarEncoder(
enParams["consumption"]["resolution"])
encodingWidth = (timeOfDayEncoder.getWidth()
+ weekendEncoder.getWidth()
+ scalarEncoder.getWidth())
sp = SpatialPooler(
inputDimensions=(encodingWidth,),
columnDimensions=(spParams["columnCount"],),
potentialPct=spParams["potentialPct"],
potentialRadius=encodingWidth,
globalInhibition=spParams["globalInhibition"],
localAreaDensity=spParams["localAreaDensity"],
numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
synPermInactiveDec=spParams["synPermInactiveDec"],
synPermActiveInc=spParams["synPermActiveInc"],
synPermConnected=spParams["synPermConnected"],
boostStrength=spParams["boostStrength"],
seed=spParams["seed"],
wrapAround=True
)
tm = TemporalMemory(
columnDimensions=(tmParams["columnCount"],),
cellsPerColumn=tmParams["cellsPerColumn"],
activationThreshold=tmParams["activationThreshold"],
initialPermanence=tmParams["initialPerm"],
connectedPermanence=spParams["synPermConnected"],
minThreshold=tmParams["minThreshold"],
maxNewSynapseCount=tmParams["newSynapseCount"],
permanenceIncrement=tmParams["permanenceInc"],
permanenceDecrement=tmParams["permanenceDec"],
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
seed=tmParams["seed"]
)
classifier = SDRClassifierFactory.create()
results = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders to create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
# Pooling.
encoding = numpy.concatenate(
[timeOfDayBits, weekendBits, consumptionBits]
)
# Create an array to represent active columns, all initially zero. This
# will be populated by the compute method below. It must have the same
# dimensions as the Spatial Pooler.
activeColumns = numpy.zeros(spParams["columnCount"])
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding, True, activeColumns)
activeColumnIndices = numpy.nonzero(activeColumns)[0]
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumnIndices, learn=True)
activeCells = tm.getActiveCells()
# Get the bucket info for this input value for classification.
bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]
# Run classifier to translate active cells back to scalar value.
classifierResult = classifier.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx,
"actValue": consumption
},
learn=True,
infer=True
)
# Print the best prediction for 1 step out.
oneStepConfidence, oneStep = sorted(
zip(classifierResult[1], classifierResult["actualValues"]),
reverse=True
)[0]
print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100))
results.append([oneStep, oneStepConfidence * 100, None, None])
return results
if __name__ == "__main__":
runHotgym(_NUM_RECORDS)
| 5,265 | Python | .py | 120 | 38.141667 | 79 | 0.733346 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,039 | classifier-compute.py | numenta_nupic-legacy/docs/examples/algo/classifier-compute.py | # Get the bucket info for this input value for classification.
bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]
# Run classifier to translate active cells back to scalar value.
classifierResult = classifier.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx,
"actValue": consumption
},
learn=True,
infer=True
)
# Print the best prediction for 1 step out.
probability, value = sorted(
zip(classifierResult[1], classifierResult["actualValues"]),
reverse=True
)[0]
print("1-step: {:16} ({:4.4}%)".format(value, probability * 100))
| 602 | Python | .py | 19 | 29.315789 | 65 | 0.760757 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,040 | sp-compute.py | numenta_nupic-legacy/docs/examples/algo/sp-compute.py | # Create an array to represent active columns, all initially zero. This
# will be populated by the compute method below. It must have the same
# dimensions as the Spatial Pooler.
activeColumns = numpy.zeros(2048)
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding, True, activeColumns)
activeColumnIndices = numpy.nonzero(activeColumns)[0]
print activeColumnIndices
| 391 | Python | .py | 8 | 47.625 | 71 | 0.824147 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,041 | create-tm.py | numenta_nupic-legacy/docs/examples/algo/create-tm.py | from nupic.algorithms.temporal_memory import TemporalMemory
tm = TemporalMemory(
# Must be the same dimensions as the SP
columnDimensions=(2048, ),
# How many cells in each mini-column.
cellsPerColumn=32,
# A segment is active if it has >= activationThreshold connected synapses
# that are active due to infActiveState
activationThreshold=16,
initialPermanence=0.21,
connectedPermanence=0.5,
# Minimum number of active synapses for a segment to be considered during
# search for the best-matching segments.
minThreshold=12,
# The max number of synapses added to a segment during learning
maxNewSynapseCount=20,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=128,
maxSynapsesPerSegment=32,
seed=1960
)
| 795 | Python | .py | 23 | 31.782609 | 75 | 0.796368 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,042 | tm-compute.py | numenta_nupic-legacy/docs/examples/algo/tm-compute.py | # Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumnIndices, learn=True)
activeCells = tm.getActiveCells()
print activeCells
| 158 | Python | .py | 4 | 38.5 | 61 | 0.850649 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,043 | create-encoders.py | numenta_nupic-legacy/docs/examples/algo/create-encoders.py | from nupic.encoders.date import DateEncoder
from nupic.encoders.random_distributed_scalar import \
RandomDistributedScalarEncoder
timeOfDayEncoder = DateEncoder(timeOfDay=(21,1))
weekendEncoder = DateEncoder(weekend=21)
scalarEncoder = RandomDistributedScalarEncoder(0.88)
| 278 | Python | .py | 6 | 44.5 | 54 | 0.859779 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,044 | encode-data.py | numenta_nupic-legacy/docs/examples/algo/encode-data.py | with open (_INPUT_FILE_PATH) as fin:
reader = csv.reader(fin)
for count, record in enumerate(reader):
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
# Pooling.
encoding = numpy.concatenate(
[timeOfDayBits, weekendBits, consumptionBits]
)
# Print complete encoding to the console as a binary representation.
print encoding.astype('int16')
| 1,139 | Python | .py | 23 | 44.608696 | 77 | 0.753597 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,045 | create-sp.py | numenta_nupic-legacy/docs/examples/algo/create-sp.py | from nupic.algorithms.spatial_pooler import SpatialPooler
encodingWidth = timeOfDayEncoder.getWidth() \
+ weekendEncoder.getWidth() \
+ scalarEncoder.getWidth()
sp = SpatialPooler(
# How large the input encoding will be.
inputDimensions=(encodingWidth),
# How many mini-columns will be in the Spatial Pooler.
columnDimensions=(2048),
# What percent of the columns's receptive field is available for potential
# synapses?
potentialPct=0.85,
# This means that the input space has no topology.
globalInhibition=True,
localAreaDensity=-1.0,
# Roughly 2%, giving that there is only one inhibition area because we have
# turned on globalInhibition (40 / 2048 = 0.0195)
numActiveColumnsPerInhArea=40.0,
# How quickly synapses grow and degrade.
synPermInactiveDec=0.005,
synPermActiveInc=0.04,
synPermConnected=0.1,
# boostStrength controls the strength of boosting. Boosting encourages
# efficient usage of SP columns.
boostStrength=3.0,
# Random number generator seed.
seed=1956,
# Determines if inputs at the beginning and end of an input dimension should
# be considered neighbors when mapping columns to inputs.
wrapAround=False
)
| 1,184 | Python | .py | 31 | 35.387097 | 78 | 0.780191 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,046 | example-create-network.py | numenta_nupic-legacy/docs/examples/network/example-create-network.py | from nupic.engine import Network
# A network that will hold the regions.
network = Network() | 93 | Python | .py | 3 | 30 | 39 | 0.8 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,047 | complete-network-example.py | numenta_nupic-legacy/docs/examples/network/complete-network-example.py | import json
import os
import yaml
from nupic.engine import Network
from nupic.encoders import MultiEncoder
from nupic.data.file_record_stream import FileRecordStream
_NUM_RECORDS = 3000
_EXAMPLE_DIR = os.path.dirname(os.path.abspath(__file__))
_INPUT_FILE_PATH = os.path.join(_EXAMPLE_DIR, os.pardir, "data", "gymdata.csv")
_PARAMS_PATH = os.path.join(_EXAMPLE_DIR, os.pardir, "params", "model.yaml")
def createDataOutLink(network, sensorRegionName, regionName):
"""Link sensor region to other region so that it can pass it data."""
network.link(sensorRegionName, regionName, "UniformLink", "",
srcOutput="dataOut", destInput="bottomUpIn")
def createFeedForwardLink(network, regionName1, regionName2):
"""Create a feed-forward link between 2 regions: regionName1 -> regionName2"""
network.link(regionName1, regionName2, "UniformLink", "",
srcOutput="bottomUpOut", destInput="bottomUpIn")
def createResetLink(network, sensorRegionName, regionName):
"""Create a reset link from a sensor region: sensorRegionName -> regionName"""
network.link(sensorRegionName, regionName, "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
def createSensorToClassifierLinks(network, sensorRegionName,
classifierRegionName):
"""Create required links from a sensor region to a classifier region."""
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="bucketIdxOut", destInput="bucketIdxIn")
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="actValueOut", destInput="actValueIn")
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="categoryOut", destInput="categoryIn")
def createEncoder(encoderParams):
"""Create a multi-encoder from params."""
encoder = MultiEncoder()
encoder.addMultipleEncoders(encoderParams)
return encoder
def createNetwork(dataSource):
"""Create and initialize a network."""
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"]
# Create a network that will hold the regions.
network = Network()
# Add a sensor region.
network.addRegion("sensor", "py.RecordSensor", '{}')
# Set the encoder and data source of the sensor region.
sensorRegion = network.regions["sensor"].getSelf()
sensorRegion.encoder = createEncoder(modelParams["sensorParams"]["encoders"])
sensorRegion.dataSource = dataSource
# Make sure the SP input width matches the sensor region output width.
modelParams["spParams"]["inputWidth"] = sensorRegion.encoder.getWidth()
# Add SP and TM regions.
network.addRegion("SP", "py.SPRegion", json.dumps(modelParams["spParams"]))
network.addRegion("TM", "py.TMRegion", json.dumps(modelParams["tmParams"]))
# Add a classifier region.
clName = "py.%s" % modelParams["clParams"].pop("regionName")
network.addRegion("classifier", clName, json.dumps(modelParams["clParams"]))
# Add all links
createSensorToClassifierLinks(network, "sensor", "classifier")
createDataOutLink(network, "sensor", "SP")
createFeedForwardLink(network, "SP", "TM")
createFeedForwardLink(network, "TM", "classifier")
# Reset links are optional, since the sensor region does not send resets.
createResetLink(network, "sensor", "SP")
createResetLink(network, "sensor", "TM")
# Make sure all objects are initialized.
network.initialize()
return network
def getPredictionResults(network, clRegionName):
"""Get prediction results for all prediction steps."""
classifierRegion = network.regions[clRegionName]
actualValues = classifierRegion.getOutputData("actualValues")
probabilities = classifierRegion.getOutputData("probabilities")
steps = classifierRegion.getSelf().stepsList
N = classifierRegion.getSelf().maxCategoryCount
results = {step: {} for step in steps}
for i in range(len(steps)):
# stepProbabilities are probabilities for this prediction step only.
stepProbabilities = probabilities[i * N:(i + 1) * N - 1]
mostLikelyCategoryIdx = stepProbabilities.argmax()
predictedValue = actualValues[mostLikelyCategoryIdx]
predictionConfidence = stepProbabilities[mostLikelyCategoryIdx]
results[steps[i]]["predictedValue"] = predictedValue
results[steps[i]]["predictionConfidence"] = predictionConfidence
return results
def runHotgym(numRecords):
"""Run the Hot Gym example."""
# Create a data source for the network.
dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
numRecords = min(numRecords, dataSource.getDataRowCount())
network = createNetwork(dataSource)
# Set predicted field
network.regions["sensor"].setParameter("predictedField", "consumption")
# Enable learning for all regions.
network.regions["SP"].setParameter("learningMode", 1)
network.regions["TM"].setParameter("learningMode", 1)
network.regions["classifier"].setParameter("learningMode", 1)
# Enable inference for all regions.
network.regions["SP"].setParameter("inferenceMode", 1)
network.regions["TM"].setParameter("inferenceMode", 1)
network.regions["classifier"].setParameter("inferenceMode", 1)
results = []
N = 1 # Run the network, N iterations at a time.
for iteration in range(0, numRecords, N):
network.run(N)
predictionResults = getPredictionResults(network, "classifier")
oneStep = predictionResults[1]["predictedValue"]
oneStepConfidence = predictionResults[1]["predictionConfidence"]
fiveStep = predictionResults[5]["predictedValue"]
fiveStepConfidence = predictionResults[5]["predictionConfidence"]
result = (oneStep, oneStepConfidence * 100,
fiveStep, fiveStepConfidence * 100)
print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result)
results.append(result)
return results
if __name__ == "__main__":
runHotgym(_NUM_RECORDS)
| 5,932 | Python | .py | 116 | 46.801724 | 80 | 0.744939 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,048 | example-link-all.py | numenta_nupic-legacy/docs/examples/network/example-link-all.py | def createDataOutLink(network, sensorRegionName, regionName):
"""Link sensor region to other region so that it can pass it data."""
network.link(sensorRegionName, regionName, "UniformLink", "",
srcOutput="dataOut", destInput="bottomUpIn")
def createFeedForwardLink(network, regionName1, regionName2):
"""Create a feed-forward link between 2 regions: regionName1 -> regionName2"""
network.link(regionName1, regionName2, "UniformLink", "",
srcOutput="bottomUpOut", destInput="bottomUpIn")
def createSensorToClassifierLinks(network, sensorRegionName, classifierRegionName):
"""Create required links from a sensor region to a classifier region."""
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="bucketIdxOut", destInput="bucketIdxIn")
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="actValueOut", destInput="actValueIn")
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="categoryOut", destInput="categoryIn")
# 1. Add data link between sensor and SP.
createDataOutLink(network, "sensor", "SP")
# 2. Add feed forward links.
createFeedForwardLink(network, "SP", "TM")
createFeedForwardLink(network, "TM", "classifier")
# 3. Add links between sensor and classifier.
createSensorToClassifierLinks(network, "sensor", "classifier")
| 1,426 | Python | .py | 23 | 56.652174 | 83 | 0.749641 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,049 | example-create-encoder.py | numenta_nupic-legacy/docs/examples/network/example-create-encoder.py | from nupic.encoders import MultiEncoder
def createEncoder(encoderParams):
encoder = MultiEncoder()
encoder.addMultipleEncoders(encoderParams)
return encoder
# Use the same modelParams extracted from the YAML file earlier.
encoderParams = modelParams["sensorParams"]["encoders"]
# Add encoder to the sensor region.
sensorRegion = network.regions["sensor"].getSelf()
sensorRegion.encoder = createEncoder(encoderParams) | 425 | Python | .py | 10 | 40.7 | 64 | 0.828087 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,050 | example-extract-results.py | numenta_nupic-legacy/docs/examples/network/example-extract-results.py | def getPredictionResults(network, clRegionName):
"""Helper function to extract results for all prediction steps."""
classifierRegion = network.regions[clRegionName]
actualValues = classifierRegion.getOutputData("actualValues")
probabilities = classifierRegion.getOutputData("probabilities")
steps = classifierRegion.getSelf().stepsList
N = classifierRegion.getSelf().maxCategoryCount
results = {step: {} for step in steps}
for i in range(len(steps)):
# stepProbabilities are probabilities for this prediction step only.
stepProbabilities = probabilities[i * N:(i + 1) * N - 1]
mostLikelyCategoryIdx = stepProbabilities.argmax()
predictedValue = actualValues[mostLikelyCategoryIdx]
predictionConfidence = stepProbabilities[mostLikelyCategoryIdx]
results[steps[i]]["predictedValue"] = predictedValue
results[steps[i]]["predictionConfidence"] = predictionConfidence
return results | 926 | Python | .py | 17 | 50.647059 | 72 | 0.787211 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,051 | example-run-network.py | numenta_nupic-legacy/docs/examples/network/example-run-network.py | # Make sure all objects are initialized.
network.initialize()
N = 1 # Run the network, N iterations at a time.
for iteration in range(0, numRecords, N):
network.run(N)
| 172 | Python | .py | 5 | 32.8 | 49 | 0.746988 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,052 | example-enable-learning-and-inference.py | numenta_nupic-legacy/docs/examples/network/example-enable-learning-and-inference.py | # Enable learning for all regions.
network.regions["SP"].setParameter("learningMode", 1)
network.regions["TM"].setParameter("learningMode", 1)
network.regions["classifier"].setParameter("learningMode", 1)
# Enable inference for all regions.
network.regions["SP"].setParameter("inferenceMode", 1)
network.regions["TM"].setParameter("inferenceMode", 1)
network.regions["classifier"].setParameter("inferenceMode", 1)
| 415 | Python | .py | 8 | 50.75 | 62 | 0.788177 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,053 | example-create-regions.py | numenta_nupic-legacy/docs/examples/network/example-create-regions.py | import json
# Add a sensor region, set its encoder and data source.
network.addRegion("sensor", "py.RecordSensor", json.dumps({"verbosity": 0}))
# Make sure the SP input width matches the sensor region output width.
modelParams["spParams"]["inputWidth"] = sensorRegion.encoder.getWidth()
# Add the SP and TM regions.
network.addRegion("SP", "py.SPRegion", json.dumps(modelParams["spParams"]))
network.addRegion("TM", "py.TMRegion", json.dumps(modelParams["tmParams"]))
# Add the classifier region.
clName = "py.%s" % modelParams[]
network.addRegion("classifier", , json.dumps(modelParams["clParams"]))
# Add all links
createSensorToClassifierLinks(network, "sensor", "classifier")
# Link the sensor region to the SP region so that it can pass it data.
createDataOutLink(network, "sensor", "SP")
# Create feed-forward links between regions.
createFeedForwardLink(network, "SP", "TM")
createFeedForwardLink(network, "TM", "classifier")
# Propagate reset signals to SP and TM regions.
# Optional if you know that your sensor regions does not send resets.
createResetLink(network, "sensor", "SP")
createResetLink(network, "sensor", "TM")
# Set the data source to the sensor region
sensorRegion = network.regions["sensor"].getSelf()
sensorRegion.dataSource = dataSource
# Set the encoder to the sensor region
sensorRegion.encoder = createEncoder(modelParams["sensorParams"]["encoders"])
# Make sure all objects are initialized.
network.initialize()
| 1,464 | Python | .py | 29 | 48.793103 | 77 | 0.778092 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,054 | example-data-source.py | numenta_nupic-legacy/docs/examples/network/example-data-source.py | from nupic.data.file_record_stream import FileRecordStream
_INPUT_FILE_PATH = "/path/to/your/data.csv"
dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
# Add the data source to the sensor region.
sensorRegion = network.regions["sensor"].getSelf()
sensorRegion.dataSource = dataSource | 293 | Python | .py | 6 | 47.666667 | 58 | 0.811189 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,055 | example-add-sp.py | numenta_nupic-legacy/docs/examples/network/example-add-sp.py | spParams = modelParams["spParams"]
# Make sure the SP input width matches the sensor output width.
spParams["inputWidth"] = sensorRegion.encoder.getWidth()
# Add SP region.
network.addRegion("SP", "py.SPRegion", json.dumps(spParams)) | 235 | Python | .py | 5 | 45.8 | 63 | 0.777293 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,056 | example-yaml-import.py | numenta_nupic-legacy/docs/examples/network/example-yaml-import.py | import yaml
_PARAMS_PATH = "/path/to/model.yaml"
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"] | 134 | Python | .py | 4 | 31.75 | 48 | 0.713178 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,057 | complete-opf-example.py | numenta_nupic-legacy/docs/examples/opf/complete-opf-example.py | import csv
import datetime
import os
import yaml
from itertools import islice
from nupic.frameworks.opf.model_factory import ModelFactory
_NUM_RECORDS = 3000
_EXAMPLE_DIR = os.path.dirname(os.path.abspath(__file__))
_INPUT_FILE_PATH = os.path.join(_EXAMPLE_DIR, os.pardir, "data", "gymdata.csv")
_PARAMS_PATH = os.path.join(_EXAMPLE_DIR, os.pardir, "params", "model.yaml")
def createModel():
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)
return ModelFactory.create(modelParams)
def runHotgym(numRecords):
model = createModel()
model.enableInference({"predictedField": "consumption"})
with open(_INPUT_FILE_PATH) as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
results = []
for record in islice(reader, numRecords):
modelInput = dict(zip(headers, record))
modelInput["consumption"] = float(modelInput["consumption"])
modelInput["timestamp"] = datetime.datetime.strptime(
modelInput["timestamp"], "%m/%d/%y %H:%M")
result = model.run(modelInput)
bestPredictions = result.inferences["multiStepBestPredictions"]
allPredictions = result.inferences["multiStepPredictions"]
oneStep = bestPredictions[1]
oneStepConfidence = allPredictions[1][oneStep]
fiveStep = bestPredictions[5]
fiveStepConfidence = allPredictions[5][fiveStep]
result = (oneStep, oneStepConfidence * 100,
fiveStep, fiveStepConfidence * 100)
print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result)
results.append(result)
return results
if __name__ == "__main__":
runHotgym(_NUM_RECORDS)
| 1,681 | Python | .py | 42 | 35.261905 | 79 | 0.699447 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,058 | load-model-example.py | numenta_nupic-legacy/docs/examples/opf/load-model-example.py | import csv
import datetime
# Open the file to loop over each row
with open ("gymdata.csv") as fileIn:
reader = csv.reader(fileIn)
# The first three rows are not data, but we'll need the field names when
# passing data into the model.
headers = reader.next()
reader.next()
reader.next()
for record in reader:
# Create a dictionary with field names as keys, row values as values.
modelInput = dict(zip(headers, record))
# Convert string consumption to float value.
modelInput["consumption"] = float(modelInput["consumption"])
# Convert timestamp string to Python datetime.
modelInput["timestamp"] = datetime.datetime.strptime(
modelInput["timestamp"], "%m/%d/%y %H:%M"
)
# Push the data into the model and get back results.
result = model.run(modelInput)
| 811 | Python | .py | 21 | 34.857143 | 74 | 0.718274 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,059 | results-example.py | numenta_nupic-legacy/docs/examples/opf/results-example.py | result = model.run(modelInput)
bestPredictions = result.inferences['multiStepBestPredictions']
allPredictions = result.inferences['multiStepPredictions']
oneStep = bestPredictions[1]
fiveStep = bestPredictions[5]
# Confidence values are keyed by prediction value in multiStepPredictions.
oneStepConfidence = allPredictions[1][oneStep]
fiveStepConfidence = allPredictions[5][fiveStep]
result = (oneStep, oneStepConfidence * 100,
fiveStep, fiveStepConfidence * 100)
print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result)
| 549 | Python | .py | 11 | 47.909091 | 74 | 0.776536 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,060 | create-model-example.py | numenta_nupic-legacy/docs/examples/opf/create-model-example.py | import yaml
from nupic.frameworks.opf.model_factory import ModelFactory
_PARAMS_PATH = "/path/to/model.yaml"
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)
model = ModelFactory.create(modelParams.MODEL_PARAMS)
# This tells the model the field to predict.
model.enableInference({'predictedField': 'consumption'})
| 338 | Python | .py | 8 | 40.5 | 59 | 0.788344 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,061 | conf.py | numenta_nupic-legacy/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# NuPIC documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 28 11:08:46 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import datetime
sourcePath = os.path.abspath(os.path.join('..', 'src'))
rootPath = os.path.abspath('../..')
versionFile = os.path.abspath(os.path.join(rootPath, 'VERSION'))
with open(versionFile, 'r') as f:
devVersion = f.read()
if devVersion.endswith('.dev0'):
nextVersion = devVersion.split('.dev0')[0]
else:
nextVersion = devVersion
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, sourcePath)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NuPIC'
copyright = u'2017, Numenta'
author = u'Numenta'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = nextVersion
# The full version, including alpha/beta/rc tags.
release = devVersion
buildDate = datetime.datetime.now().strftime(
"Documentation built on %B %d, %Y at %H:%M:%S"
)
# Ensures release version is included in output
rst_epilog_pre = """
.. |release| replace:: {}
.. |buildDate| replace:: {}
"""
rst_epilog = rst_epilog_pre.format(release, buildDate)
# Adds markdown support (pip install recommonmark)
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'numenta',
'github_repo': 'nupic',
'github_banner': True,
'font_family': 'Verdana',
'head_font_family': 'Verdana',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = 'numenta-logo.png'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html'
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NuPICdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NuPIC.tex', u'NuPIC Documentation',
u'Numenta', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nupic', u'NuPIC Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NuPIC', u'NuPIC Documentation',
author, 'NuPIC', 'One line description of project.',
'Miscellaneous'),
]
| 5,691 | Python | .py | 153 | 34.934641 | 79 | 0.694323 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,062 | install_python_pip.ps1 | numenta_nupic-legacy/ci/appveyor/install_python_pip.ps1 | # Sample script to install Python and pip under Windows
# Authors: Olivier Grisel and Kyle Kastner
# License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/
$BASE_URL = "https://www.python.org/ftp/python/"
$GET_PIP_URL = "http://releases.numenta.org/pip/1ebd3cb7a5a3073058d0c9552ab074bd/get-pip.py"
$GET_PIP_PATH = "C:\get-pip.py"
$GET_NUMPY_URL = "https://bitbucket.org/carlkl/mingw-w64-for-python/downloads/numpy-1.9.1+openblas-cp27-none-win_amd64.whl"
$GET_NUMPY_PATH = "C:\numpy-1.9.1+openblas-cp27-none-win_amd64.whl"
function DownloadPython ($python_version, $platform_suffix) {
$webclient = New-Object System.Net.WebClient
$filename = "python-" + $python_version + $platform_suffix + ".msi"
$url = $BASE_URL + $python_version + "/" + $filename
$basedir = $pwd.Path + "\"
$filepath = $basedir + $filename
if (Test-Path $filename) {
Write-Host "Reusing" $filepath
return $filepath
}
# Download and retry up to 5 times in case of network transient errors.
Write-Host "Downloading" $filename "from" $url
$retry_attempts = 3
for($i=0; $i -lt $retry_attempts; $i++){
try {
$webclient.DownloadFile($url, $filepath)
break
}
Catch [Exception]{
Start-Sleep 1
}
}
Write-Host "File saved at" $filepath
return $filepath
}
function InstallPython ($python_version, $architecture, $python_home) {
Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home
if ( $(Try { Test-Path $python_home.trim() } Catch { $false }) ) {
Write-Host $python_home "already exists, skipping."
return $false
}
if ($architecture -eq "32") {
$platform_suffix = ""
} else {
$platform_suffix = ".amd64"
}
$filepath = DownloadPython $python_version $platform_suffix
Write-Host "Installing" $filepath "to" $python_home
$args = "/qn /i $filepath TARGETDIR=$python_home"
Write-Host "msiexec.exe" $args
Start-Process -FilePath "msiexec.exe" -ArgumentList $args -Wait -Passthru
Write-Host "Python $python_version ($architecture) installation complete"
return $true
}
function InstallPip ($python_home) {
$pip_path = $python_home + "\Scripts\pip.exe"
$python_path = $python_home + "\python.exe"
if ( $(Try { Test-Path $pip_path.trim() } Catch { $false }) ) {
Write-Host "pip already installed at " $pip_path
return $false
}
Write-Host "Installing pip..."
$webclient = New-Object System.Net.WebClient
$webclient.DownloadFile($GET_PIP_URL, $GET_PIP_PATH)
Write-Host "Executing:" $python_path $GET_PIP_PATH
Start-Process -FilePath "$python_path" -ArgumentList "$GET_PIP_PATH" -Wait -Passthru
return $true
}
function main () {
InstallPython $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHONHOME
InstallPip $env:PYTHONHOME
$python_path = $env:PYTHONHOME + "\python.exe"
$pip_path = $env:PYTHONHOME + "\Scripts\pip.exe"
Write-Host "python -m pip install --upgrade pip"
& $python_path -m pip install --upgrade pip
Write-Host "pip install " wheel
& $pip_path install wheel
Write-Host "pip install " numpy==1.9.1
#& $pip_path install -i https://pypi.numenta.com/pypi numpy==1.9.1
# Check AppVeyor cloud cache for NumPy wheel
if (-Not (Test-Path $GET_NUMPY_PATH)) {
$webclient = New-Object System.Net.WebClient
$webclient.DownloadFile($GET_NUMPY_URL, $GET_NUMPY_PATH)
}
& $pip_path install $GET_NUMPY_PATH
}
main
| 3,596 | Python | .py | 85 | 36.976471 | 123 | 0.668958 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,063 | deploy-wheel-to-s3.py | numenta_nupic-legacy/ci/travis/deploy-wheel-to-s3.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
import boto
from boto.s3.key import Key
# This script assumes the following environment variables are set for boto:
# - AWS_ACCESS_KEY_ID
# - AWS_SECRET_ACCESS_KEY
REGION = "us-west-2"
BUCKET = "artifacts.numenta.org"
RELEASE_FOLDER = "numenta/nupic/releases"
def upload(artifactsBucket, wheelFileName, wheelPath):
key = Key(artifactsBucket)
key.key = "%s/%s" % (RELEASE_FOLDER, wheelFileName)
print "Uploading %s to %s/%s..." % (wheelFileName, BUCKET, RELEASE_FOLDER)
key.set_contents_from_filename(wheelPath)
def run(wheelPath):
wheelFileName = os.path.basename(wheelPath)
conn = boto.connect_s3()
artifactsBucket = conn.get_bucket(BUCKET)
upload(artifactsBucket, wheelFileName, wheelPath)
if __name__ == "__main__":
wheelPath = sys.argv[1]
run(wheelPath)
| 1,787 | Python | .py | 43 | 39.813953 | 76 | 0.704965 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,064 | run_pylint.sh | numenta_nupic-legacy/ci/travis/run_pylint.sh | #!/bin/bash
exit_code=0;
echo "============= PYLINT ============="
for checkable in $(git diff --name-only ${TRAVIS_BRANCH} | grep py$)
do
echo "===================>"
echo "= running pylint on $checkable"
echo "===================>"
pylint --rcfile=${NUPIC}/pylintrc $checkable
echo $((exit_code+=$?)) > /dev/null
done
if [ "$exit_code" -eq 0 ]; then
echo "========== PYLINT PASSED ========="
else
echo "========== PYLINT FAILED ========="
exit $exit_code
fi
| 495 | Python | .py | 17 | 26 | 68 | 0.495781 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,065 | serializable_test.py | numenta_nupic-legacy/tests/unit/nupic/serializable_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import importlib
import inspect
import os
import pkgutil
import tempfile
import unittest
import numpy
try:
import capnp
import serializable_test_capnp
except ImportError:
# Ignore for platforms in which capnp is not available, e.g. windows
capnp = None
import nupic
from nupic.frameworks.opf.common_models.cluster_params import (
getScalarMetricWithTimeOfDayAnomalyParams)
from nupic.serializable import Serializable
MODEL_PARAMS = getScalarMetricWithTimeOfDayAnomalyParams([0],
minVal=23.42,
maxVal=23.420001)
SERIALIZABLE_SUBCLASSES = {
"MovingAverage": {
"params": {"windowSize": 1}
},
"AnomalyLikelihood": {},
"BacktrackingTM": {},
"Connections": {"params": {"numCells": 1}},
"TemporalMemory": {},
"KNNClassifier": {},
"SDRClassifier": {},
"SpatialPooler": {
"params": {"inputDimensions": (2, 2), "columnDimensions": (4, 4)}
},
"Encoder": {},
"Model": {},
"AnomalyLikelihoodRegion": {},
"AnomalyRegion": {},
"TestRegion": {
"skip": True
},
"BacktrackingTMCPP": {},
"TemporalMemoryShim": {},
"MonitoredTemporalMemory": {
"volatile": ["mmName", "_mmTransitionTracesStale", "_mmTraces", "_mmData",
"_mmResetActive"]
},
"TMShim": {},
"MonitoredTMShim": {
"volatile": ["mmName", "_mmTraces", "_mmData", "_mmResetActive"]
},
"ScalarEncoder": {
"params": {"w": 21, "n": 1024, "minval": 0, "maxval": 100}
},
"RandomDistributedScalarEncoder": {
"params": {"resolution": 1}
},
"DateEncoder": {},
"LogEncoder": {
"params": {"w": 21, "n": 100}
},
"CategoryEncoder": {
"params": {"w": 21, "categoryList": ["a", "b", "c"]}
},
"SDRCategoryEncoder": {
"params": {"n": 100, "w": 21}
},
"ScalarSpaceEncoder": {},
"CoordinateEncoder": {},
"PassThroughEncoder": {
"params": {"n": 100, "w": 21}
},
"MultiEncoder": {},
"AdaptiveScalarEncoder": {
"params": {"w": 21, "n": 1024, "minval": 0, "maxval": 100}
},
"DeltaEncoder": {
"params": {"w": 21, "n": 1024, "minval": 0, "maxval": 100}
},
"GeospatialCoordinateEncoder": {
"params": {"scale": 1, "timestep": 1}
},
"SparsePassThroughEncoder": {
"params": {"n": 100, "w": 21}
},
"HTMPredictionModel": {
"params": MODEL_PARAMS['modelConfig']['modelParams']
},
"TwoGramModel": {
"params": {"encoderParams": {"blah": {"fieldname": "blah", "maxval": 9,
"minval": 0, "n": 10, "w": 1,
"clipInput": True, "forced": True,
"type": "ScalarEncoder"}}}
},
"PreviousValueModel": {}
}
def _allSubclasses(cls):
"""
Get all subclasses
:param cls: The class to get subclasses from
:return: list with all subclasses
"""
return cls.__subclasses__() + [
g for s in cls.__subclasses__() for g in _allSubclasses(s)
]
def _getAttributes(obj):
"""
Get all attributes of the given object
"""
if isinstance(obj, dict):
attrs = obj
elif hasattr(obj, "__slots__"):
attrs = {attr: getattr(obj, attr) for attr in obj.__slots__}
elif hasattr(obj, "__dict__"):
attrs = obj.__dict__
# Ignore volatile fields when comparing field values
testParams = SERIALIZABLE_SUBCLASSES[obj.__class__.__name__]
if "volatile" in testParams:
for f in testParams["volatile"]:
if f in attrs:
del attrs[f]
return attrs
def _remove(fname):
"""
Clean up function used to delete files created by the test
:param fname: File to be deleted
:return:
"""
if os.path.isfile(fname):
os.remove(fname)
@unittest.skipUnless(capnp, "Capnp not available.")
class SerializableTest(unittest.TestCase):
# pylint: disable=R0201,W0223
def customAssertArrayEquals(self, a1, a2, msg=None):
"""
Function used by `addTypeEqualityFunc` comparing numpy arrays
"""
numpy.testing.assert_equal(a1, a2, msg)
def customAssertSequenceEquals(self, l1, l2, msg=None):
"""
Function used by `addTypeEqualityFunc` comparing sequences
"""
self.assertEquals(len(l1), len(l2), msg)
for i in xrange(len(l1)):
first = l1[i]
second = l2[i]
if type(first).__name__ in SERIALIZABLE_SUBCLASSES:
first = _getAttributes(first)
second = _getAttributes(second)
self.assertEquals(first, second, msg)
def customAssertDictEquals(self, d1, d2, msg=None):
"""
Function used by `addTypeEqualityFunc` comparing dicts
"""
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
self.assertEquals(len(d1), len(d2), msg + str(d1) + ' != ' + str(d2))
for k, _ in d1.items():
if k not in d2:
raise AssertionError(repr(k))
first = d1[k]
second = d2[k]
if type(first).__name__ in SERIALIZABLE_SUBCLASSES:
first = _getAttributes(first)
second = _getAttributes(second)
self.assertEquals(first, second, 'key=%r\n%s' % (k, msg))
def testABCProtocolEnforced(self):
# pylint: disable=E0110
class Foo(Serializable):
pass # read(), write(), getCapnpSchema() not implemented here
with self.assertRaises(TypeError):
Foo()
def testReadFromAndWriteToFile(self):
""" Test generic usage of serializable mixin class """
class Bar(object):
pass
class Foo(Bar, Serializable):
def __init__(self, bar):
self.bar = bar
@classmethod
def getSchema(cls):
return serializable_test_capnp.Foo
@classmethod
def read(cls, proto):
foo = object.__new__(cls)
foo.bar = proto.bar
return foo
def write(self, proto):
proto.bar = self.bar
filename = tempfile.mktemp()
self.addCleanup(_remove, filename)
with open(filename, "wb") as outp:
Foo("bar").writeToFile(outp)
with open(filename, "rb") as inp:
self.assertEqual(Foo.readFromFile(inp).bar, "bar")
def testAllSubClasses(self):
"""
Test all Serializable subclasses making sure all the fields are initialized
"""
self.addTypeEqualityFunc(numpy.ndarray, self.customAssertArrayEquals)
self.addTypeEqualityFunc(tuple, self.customAssertSequenceEquals)
self.addTypeEqualityFunc(list, self.customAssertSequenceEquals)
self.addTypeEqualityFunc(dict, self.customAssertDictEquals)
# Import all nupic modules to find Serializable subclasses
packages = pkgutil.walk_packages(path=nupic.__path__,
prefix=nupic.__name__ + ".")
for _, modname, ispkg in packages:
if not ispkg:
try:
importlib.import_module(modname)
except: # pylint: disable=W0702
pass # Ignore deprecated modules
# Check every Serializable subclass
for klass in _allSubclasses(Serializable):
if inspect.isabstract(klass):
continue
# Make sure all serializable classes are accounted for
self.assertIn(klass.__name__, SERIALIZABLE_SUBCLASSES)
print klass.__name__
testParams = SERIALIZABLE_SUBCLASSES[klass.__name__]
# Skip test class
if "skip" in testParams:
continue
# Instantiate class with test parameters
if "params" in testParams:
original = klass(**(testParams["params"]))
else:
original = klass()
# Test read/write
filename = tempfile.mktemp()
self.addCleanup(_remove, filename)
with open(filename, "wb") as outp:
original.writeToFile(outp)
with open(filename, "rb") as inp:
serialized = klass.readFromFile(inp)
expected = _getAttributes(original)
actual = _getAttributes(serialized)
# Make sure all fields were initialized
self.assertEquals(actual, expected, klass.__name__)
| 8,921 | Python | .py | 258 | 28.953488 | 79 | 0.637071 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,066 | utils_test.py | numenta_nupic-legacy/tests/unit/nupic/utils_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for utils module."""
import pickle
import tempfile
import unittest
from nupic.utils import MovingAverage
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.movingaverage_capnp import MovingAverageProto
class UtilsTest(unittest.TestCase):
"""testing common.utils"""
def testMovingAverage(self):
"""
Test that the (internal) moving average maintains the averages correctly,
even for null initial condition and when the number of values goes over
windowSize. Pass in integers and floats.
"""
historicalValues = []
total = 0
windowSize = 3
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 3, windowSize)
)
self.assertEqual(newAverage, 3.0)
self.assertEqual(historicalValues, [3.0])
self.assertEqual(total, 3.0)
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 4, windowSize)
)
self.assertEqual(newAverage, 3.5)
self.assertListEqual(historicalValues, [3.0, 4.0])
self.assertEqual(total, 7.0)
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 5.0, windowSize)
)
self.assertEqual(newAverage, 4.0)
self.assertListEqual(historicalValues, [3.0, 4.0, 5.0])
self.assertEqual(total, 12.0)
# Ensure the first value gets popped
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 6.0, windowSize)
)
self.assertEqual(newAverage, 5.0)
self.assertListEqual(historicalValues, [4.0, 5.0, 6.0])
self.assertEqual(total, 15.0)
def testMovingAverageInstance(self):
"""
Test that the (internal) moving average maintains the averages correctly,
even for null initial condition and when the number of values goes over
windowSize. Pass in integers and floats.
this is for the instantce method next()
"""
ma = MovingAverage(windowSize=3)
newAverage = ma.next(3)
self.assertEqual(newAverage, 3.0)
self.assertListEqual(ma.getSlidingWindow(), [3.0])
self.assertEqual(ma.total, 3.0)
newAverage = ma.next(4)
self.assertEqual(newAverage, 3.5)
self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0])
self.assertEqual(ma.total, 7.0)
newAverage = ma.next(5)
self.assertEqual(newAverage, 4.0)
self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0, 5.0])
self.assertEqual(ma.total, 12.0)
# Ensure the first value gets popped
newAverage = ma.next(6)
self.assertEqual(newAverage, 5.0)
self.assertListEqual(ma.getSlidingWindow(), [4.0, 5.0, 6.0])
self.assertEqual(ma.total, 15.0)
def testMovingAverageSlidingWindowInit(self):
"""
Test the slidingWindow value is correctly assigned when initializing a
new MovingAverage object.
"""
# With exisiting historical values; same values as tested in testMovingAverage()
ma = MovingAverage(windowSize=3, existingHistoricalValues=[3.0, 4.0, 5.0])
self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0, 5.0])
# Withoout exisiting historical values
ma = MovingAverage(windowSize=3)
self.assertListEqual(ma.getSlidingWindow(), [])
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testMovingAverageReadWrite(self):
ma = MovingAverage(windowSize=3)
ma.next(3)
ma.next(4.5)
ma.next(5)
proto1 = MovingAverageProto.new_message()
ma.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = MovingAverageProto.read(f)
resurrectedMa = MovingAverage.read(proto2)
newAverage = ma.next(6)
self.assertEqual(newAverage, resurrectedMa.next(6))
self.assertListEqual(ma.getSlidingWindow(),
resurrectedMa.getSlidingWindow())
self.assertEqual(ma.total, resurrectedMa.total)
self.assertTrue(ma, resurrectedMa) #using the __eq__ method
def testSerialization(self):
"""serialization using pickle"""
ma = MovingAverage(windowSize=3)
ma.next(3)
ma.next(4.5)
ma.next(5)
stored = pickle.dumps(ma)
restored = pickle.loads(stored)
self.assertEqual(restored, ma)
self.assertEqual(ma.next(6), restored.next(6))
def testEquals(self):
ma = MovingAverage(windowSize=3)
maP = MovingAverage(windowSize=3)
self.assertEqual(ma, maP)
maN = MovingAverage(windowSize=10)
self.assertNotEqual(ma, maN)
ma = MovingAverage(windowSize=2, existingHistoricalValues=[3.0, 4.0, 5.0])
maP = MovingAverage(windowSize=2, existingHistoricalValues=[3.0, 4.0, 5.0])
self.assertEqual(ma, maP)
maP.next(6)
self.assertNotEqual(ma, maP)
ma.next(6)
self.assertEqual(ma, maP)
if __name__ == "__main__":
unittest.main()
| 5,895 | Python | .py | 149 | 34.946309 | 84 | 0.706688 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,067 | dictutils_test.py | numenta_nupic-legacy/tests/unit/nupic/data/dictutils_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for dictutils module."""
import unittest2 as unittest
from nupic.data import dict_utils
from nupic.swarming.utils import rCopy
class TestDictUtils(unittest.TestCase):
def testRUpdateEmpty(self):
d = {}
# Both empty.
dict_utils.rUpdate(d, {})
self.assertDictEqual(d, {})
# Original empty.
dict_utils.rUpdate(d, {"a": 1})
self.assertDictEqual(d, {"a": 1})
# Update empty.
dict_utils.rUpdate(d, {})
self.assertDictEqual(d, {"a": 1})
def testRUpdateBasic(self):
d = {"a": {"b": 2, "e": 4},
"c": {"d": 3}}
dict_utils.rUpdate(d, {"a": {"b": 5}})
self.assertDictEqual(d, {"a": {"b": 5, "e": 4},
"c": {"d": 3}})
def testRCopyEmpty(self):
d = {}
self.assertDictEqual(d, rCopy(d))
self.assertDictEqual(d, rCopy(d, lambda x: 2* x))
def testRCopyFlatDict(self):
d = {"a": 1, "b": 2, "c": 3}
self.assertDictEqual(d, rCopy(d))
def f(value, _keys):
return value * 2
expected = {"a": 2, "b": 4, "c": 6}
self.assertDictEqual(expected, rCopy(d, f))
def testRCopyNestedDict(self):
d = {"a": {"b": {"c": 1}}}
self.assertDictEqual(d, rCopy(d))
def f(value, _keys):
return value * 2
expected = {"a": {"b": {"c": 2}}}
self.assertDictEqual(expected, rCopy(d, f))
def testRCopyComplexNestedDict(self):
d = {"a": {"b": {"c": [1, 2, 3]}, "d": "Hello", "e": 17}}
self.assertDictEqual(d, rCopy(d))
def f(value, _keys):
return value * 2
expected = {"a": {"b": {"c": [1, 2, 3, 1, 2, 3]},
"d": "HelloHello", "e": 34}}
self.assertDictEqual(expected, rCopy(d, f))
if __name__ == "__main__":
unittest.main()
| 2,706 | Python | .py | 70 | 34.185714 | 72 | 0.599847 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,068 | filters_test.py | numenta_nupic-legacy/tests/unit/nupic/data/filters_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for filters module.
NOTE: This test was migrated from the old repo and could use some refactoring.
"""
from datetime import datetime
import numpy
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.regions.record_sensor import RecordSensor
from nupic.data.file_record_stream import FileRecordStream
from nupic.encoders import MultiEncoder
from nupic.data.filters import DeltaFilter
class FiltersTest(unittest.TestCase):
@unittest.skip("Disabled until we figure out why it is failing in internal"
" tests")
def testDeltaFilter(self):
"""
data looks like: should generate deltas
"t" "s" "dt" "ds"
t 10 X
t+1s 20 1s 10
t+1d 50 86399 30
r t+1d+1s 60 X
r+1d+3s 65 2s 5
"""
r = RecordSensor()
filename = resource_filename("nupic.datafiles", "extra/qa/delta.csv")
datasource = FileRecordStream(filename)
r.dataSource = datasource
n = 50
encoder = MultiEncoder({'blah': dict(fieldname="s", type='ScalarEncoder',
n=n, w=11, minval=0, maxval=100)})
r.encoder = encoder
# Test #1 -- no deltas
# Make sure we get a reset when the gym changes
resetOut = numpy.zeros((1,), dtype='float')
sequenceIdOut = numpy.zeros((1,), dtype='float')
dataOut = numpy.zeros((n,), dtype='float')
sourceOut = numpy.zeros((1,), dtype='float')
categoryOut = numpy.zeros((1,), dtype='float')
outputs = dict(resetOut=resetOut,
sourceOut = sourceOut,
sequenceIdOut = sequenceIdOut,
dataOut = dataOut,
categoryOut = categoryOut)
inputs = dict()
r.verbosity=0
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=24, hour=16,
minute=8, second=0))
self.assertEqual(lr['s'], 10)
self.assertEqual(lr['_reset'], 1)
self.assertTrue('dt' not in lr)
self.assertTrue('ds' not in lr)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=24, hour=16,
minute=8, second=1))
self.assertEqual(lr['s'], 20)
self.assertEqual(lr['_reset'], 0)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=0))
self.assertEqual(lr['s'], 50)
self.assertEqual(lr['_reset'], 0)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=1))
self.assertEqual(lr['s'], 60)
self.assertEqual(lr['_reset'], 1)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=3))
self.assertEqual(lr['s'], 65)
self.assertEqual(lr['_reset'], 0)
# Add filters
r.preEncodingFilters = [DeltaFilter("s", "ds"), DeltaFilter("t", "dt")]
r.rewind()
# skip first record, which has a reset
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=24, hour=16,
minute=8, second=1))
self.assertEqual(lr['s'], 20)
self.assertEqual(lr['_reset'], 1) # this record should have a reset since
# it is first of a sequence
self.assertEqual(lr['dt'], 1)
self.assertEqual(lr['ds'], 10)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=0))
self.assertEqual(lr['s'], 50)
self.assertEqual(lr['_reset'], 0)
self.assertEqual(lr['dt'], 3600 * 24 - 1)
self.assertEqual(lr['ds'], 30)
# next reset record is skipped
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=3))
self.assertEqual(lr['s'], 65)
self.assertEqual(lr['_reset'], 1)
self.assertEqual(lr['dt'], 2)
self.assertEqual(lr['ds'], 5)
if __name__ == "__main__":
unittest.main()
| 5,567 | Python | .py | 130 | 35.3 | 78 | 0.599149 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,069 | inference_shifter_test.py | numenta_nupic-legacy/tests/unit/nupic/data/inference_shifter_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for InferenceShifter."""
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.opf_utils import InferenceElement, ModelResult
from nupic.support.unittesthelpers.testcasebase import (TestCaseBase,
unittest)
class TestInferenceShifter(TestCaseBase):
def _shiftAndCheck(self, inferences, expectedOutput):
inferenceShifter = InferenceShifter()
for inference, expected in zip(inferences, expectedOutput):
inputResult = ModelResult(inferences=inference)
outputResult = inferenceShifter.shift(inputResult)
self.assertEqual(outputResult.inferences, expected)
def testNoShift(self):
for element in (InferenceElement.anomalyScore,
InferenceElement.classification,
InferenceElement.classConfidences):
inferences = [
{element: 1},
{element: 2},
{element: 3},
]
expectedOutput = [
{element: 1},
{element: 2},
{element: 3},
]
self._shiftAndCheck(inferences, expectedOutput)
def testNoShiftMultipleValues(self):
for element in (InferenceElement.anomalyScore,
InferenceElement.classification,
InferenceElement.classConfidences):
inferences = [
{element: [1, 2, 3]},
{element: [4, 5, 6]},
{element: [5, 6, 7]},
]
expectedOutput = [
{element: [1, 2, 3]},
{element: [4, 5, 6]},
{element: [5, 6, 7]},
]
self._shiftAndCheck(inferences, expectedOutput)
def testSingleShift(self):
for element in (InferenceElement.prediction,
InferenceElement.encodings):
inferences = [
{element: 1},
{element: 2},
{element: 3},
]
expectedOutput = [
{element: None},
{element: 1},
{element: 2},
]
self._shiftAndCheck(inferences, expectedOutput)
def testSingleShiftMultipleValues(self):
for element in (InferenceElement.prediction,
InferenceElement.encodings):
inferences = [
{element: [1, 2, 3]},
{element: [4, 5, 6]},
{element: [5, 6, 7]},
]
expectedOutput = [
{element: [None, None, None]},
{element: [1, 2, 3]},
{element: [4, 5, 6]},
]
self._shiftAndCheck(inferences, expectedOutput)
def testMultiStepShift(self):
for element in (InferenceElement.multiStepPredictions,
InferenceElement.multiStepBestPredictions):
inferences = [
{element: {2: 1}},
{element: {2: 2}},
{element: {2: 3}},
{element: {2: 4}},
]
expectedOutput = [
{element: {2: None}},
{element: {2: None}},
{element: {2: 1}},
{element: {2: 2}},
]
self._shiftAndCheck(inferences, expectedOutput)
def testMultiStepShiftMultipleValues(self):
for element in (InferenceElement.multiStepPredictions,
InferenceElement.multiStepBestPredictions):
inferences = [
{element: {2: [1, 11]}},
{element: {2: [2, 12]}},
{element: {2: [3, 13]}},
{element: {2: [4, 14]}},
]
expectedOutput = [
{element: {2: None}},
{element: {2: None}},
{element: {2: [1, 11]}},
{element: {2: [2, 12]}},
]
self._shiftAndCheck(inferences, expectedOutput)
def testDifferentMultiStepsShift(self):
for element in (InferenceElement.multiStepPredictions,
InferenceElement.multiStepBestPredictions):
inferences = [
{element: {2: 1, 3: 5}},
{element: {2: 2, 3: 6}},
{element: {2: 3, 3: 7}},
{element: {2: 4, 3: 8}},
]
expectedOutput = [
{element: {2: None, 3: None}},
{element: {2: None, 3: None}},
{element: {2: 1, 3: None}},
{element: {2: 2, 3: 5}},
]
self._shiftAndCheck(inferences, expectedOutput)
def testDifferentMultiStepsShiftMultipleValues(self):
for element in (InferenceElement.multiStepPredictions,
InferenceElement.multiStepBestPredictions):
inferences = [
{element: {2: [1, 11], 3: [5, 15]}},
{element: {2: [2, 12], 3: [6, 16]}},
{element: {2: [3, 13], 3: [7, 17]}},
{element: {2: [4, 14], 3: [8, 18]}},
]
expectedOutput = [
{element: {2: None, 3: None}},
{element: {2: None, 3: None}},
{element: {2: [1, 11], 3: None}},
{element: {2: [2, 12], 3: [5, 15]}},
]
self._shiftAndCheck(inferences, expectedOutput)
if __name__ == '__main__':
unittest.main()
| 5,815 | Python | .py | 156 | 28.910256 | 72 | 0.574592 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,070 | record_stream_test.py | numenta_nupic-legacy/tests/unit/nupic/data/record_stream_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for nupic.data.record_stream."""
from datetime import datetime
import unittest
import mock
from nupic.data.field_meta import FieldMetaInfo, FieldMetaType, FieldMetaSpecial
from nupic.data.record_stream import ModelRecordEncoder, RecordStreamIface
class ModelRecordEncoderTest(unittest.TestCase):
def testEmptyFieldsArgRaisesValueErrorInConstructor(self):
with self.assertRaises(ValueError):
ModelRecordEncoder(fields=[])
def testEncoderWithSequenceAndResetFields(self):
fields = [
FieldMetaInfo('name', FieldMetaType.string,
FieldMetaSpecial.none),
FieldMetaInfo('timestamp', FieldMetaType.datetime,
FieldMetaSpecial.timestamp),
FieldMetaInfo('integer', FieldMetaType.integer,
FieldMetaSpecial.none),
FieldMetaInfo('real', FieldMetaType.float,
FieldMetaSpecial.none),
FieldMetaInfo('reset', FieldMetaType.integer,
FieldMetaSpecial.reset),
FieldMetaInfo('sid', FieldMetaType.string,
FieldMetaSpecial.sequence),
FieldMetaInfo('categories', FieldMetaType.list,
FieldMetaSpecial.category)
]
encoder = ModelRecordEncoder(fields=fields)
result = encoder.encode(
['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1, 99,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_1',
'timestamp': datetime(2010, 3, 1, 0, 0),
'integer': 5,
'real': 6.5,
'reset': 1,
'sid': 99,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 1,
'_sequenceId': 99,
'_timestamp': datetime(2010, 3, 1, 0, 0),
'_timestampRecordIdx': None })
def testEncoderWithResetFieldWithoutSequenceField(self):
fields = [
FieldMetaInfo('name', FieldMetaType.string,
FieldMetaSpecial.none),
FieldMetaInfo('timestamp', FieldMetaType.datetime,
FieldMetaSpecial.timestamp),
FieldMetaInfo('integer', FieldMetaType.integer,
FieldMetaSpecial.none),
FieldMetaInfo('real', FieldMetaType.float,
FieldMetaSpecial.none),
FieldMetaInfo('reset', FieldMetaType.integer,
FieldMetaSpecial.reset),
FieldMetaInfo('categories', FieldMetaType.list,
FieldMetaSpecial.category)
]
encoder = ModelRecordEncoder(fields=fields)
result = encoder.encode(
['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_1',
'timestamp': datetime(2010, 3, 1, 0, 0),
'integer': 5,
'real': 6.5,
'reset': 1,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 1,
'_sequenceId': 0,
'_timestamp': datetime(2010, 3, 1, 0, 0),
'_timestampRecordIdx': None })
# One more time to verify incremeting sequence id
result = encoder.encode(
['rec_2', datetime(day=2, month=3, year=2010), 5, 6.5, 1,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_2',
'timestamp': datetime(2010, 3, 2, 0, 0),
'integer': 5,
'real': 6.5,
'reset': 1,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 1,
'_sequenceId': 1,
'_timestamp': datetime(2010, 3, 2, 0, 0),
'_timestampRecordIdx': None })
# Now with reset turned off, expecting no change to sequence id
result = encoder.encode(
['rec_3', datetime(day=3, month=3, year=2010), 5, 6.5, 0,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_3',
'timestamp': datetime(2010, 3, 3, 0, 0),
'integer': 5,
'real': 6.5,
'reset': 0,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 0,
'_sequenceId': 1,
'_timestamp': datetime(2010, 3, 3, 0, 0),
'_timestampRecordIdx': None })
# Now check that rewind resets sequence id
encoder.rewind()
result = encoder.encode(
['rec_4', datetime(day=4, month=3, year=2010), 5, 6.5, 1,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_4',
'timestamp': datetime(2010, 3, 4, 0, 0),
'integer': 5,
'real': 6.5,
'reset': 1,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 1,
'_sequenceId': 0,
'_timestamp': datetime(2010, 3, 4, 0, 0),
'_timestampRecordIdx': None })
def testEncoderWithSequenceFieldWithoutResetField(self):
fields = [
FieldMetaInfo('name', FieldMetaType.string,
FieldMetaSpecial.none),
FieldMetaInfo('timestamp', FieldMetaType.datetime,
FieldMetaSpecial.timestamp),
FieldMetaInfo('integer', FieldMetaType.integer,
FieldMetaSpecial.none),
FieldMetaInfo('real', FieldMetaType.float,
FieldMetaSpecial.none),
FieldMetaInfo('sid', FieldMetaType.string,
FieldMetaSpecial.sequence),
FieldMetaInfo('categories', FieldMetaType.list,
FieldMetaSpecial.category)
]
encoder = ModelRecordEncoder(fields=fields)
# _reset should be 1 the first time
result = encoder.encode(
['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 99,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_1',
'timestamp': datetime(2010, 3, 1, 0, 0),
'integer': 5,
'real': 6.5,
'sid': 99,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 1,
'_sequenceId': 99,
'_timestamp': datetime(2010, 3, 1, 0, 0),
'_timestampRecordIdx': None })
# _reset should be 0 when same sequence id is repeated
result = encoder.encode(
['rec_2', datetime(day=2, month=3, year=2010), 5, 6.5, 99,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_2',
'timestamp': datetime(2010, 3, 2, 0, 0),
'integer': 5,
'real': 6.5,
'sid': 99,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 0,
'_sequenceId': 99,
'_timestamp': datetime(2010, 3, 2, 0, 0),
'_timestampRecordIdx': None })
# _reset should be 1 when sequence id changes
result = encoder.encode(
['rec_3', datetime(day=2, month=3, year=2010), 5, 6.5, 100,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_3',
'timestamp': datetime(2010, 3, 2, 0, 0),
'integer': 5,
'real': 6.5,
'sid': 100,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 1,
'_sequenceId': 100,
'_timestamp': datetime(2010, 3, 2, 0, 0),
'_timestampRecordIdx': None })
def testEncoderWithoutResetAndSequenceFields(self):
fields = [
FieldMetaInfo('name', FieldMetaType.string,
FieldMetaSpecial.none),
FieldMetaInfo('timestamp', FieldMetaType.datetime,
FieldMetaSpecial.timestamp),
FieldMetaInfo('integer', FieldMetaType.integer,
FieldMetaSpecial.none),
FieldMetaInfo('real', FieldMetaType.float,
FieldMetaSpecial.none),
FieldMetaInfo('categories', FieldMetaType.list,
FieldMetaSpecial.category)
]
encoder = ModelRecordEncoder(fields=fields)
result = encoder.encode(
['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_1',
'timestamp': datetime(2010, 3, 1, 0, 0),
'integer': 5,
'real': 6.5,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 0,
'_sequenceId': 0,
'_timestamp': datetime(2010, 3, 1, 0, 0),
'_timestampRecordIdx': None })
# One more time to verify that sequence id is still 0
result = encoder.encode(
['rec_2', datetime(day=2, month=3, year=2010), 5, 6.5,
[0, 1, 2]])
self.assertEqual(
result,
{
'name': 'rec_2',
'timestamp': datetime(2010, 3, 2, 0, 0),
'integer': 5,
'real': 6.5,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 0,
'_sequenceId': 0,
'_timestamp': datetime(2010, 3, 2, 0, 0),
'_timestampRecordIdx': None })
class RecordStreamIfaceTest(unittest.TestCase):
class MyRecordStream(RecordStreamIface):
"""Record stream class for testing functionality of the RecordStreamIface
abstract base class
"""
def __init__(self, fieldsMeta):
super(RecordStreamIfaceTest.MyRecordStream, self).__init__()
self._fieldsMeta = fieldsMeta
self._fieldNames = tuple(f.name for f in fieldsMeta)
def getNextRecord(self, useCache=True):
"""[ABC method implementation]
retval: a data row (a list or tuple) if available; None, if no more records
in the table (End of Stream - EOS); empty sequence (list or tuple)
when timing out while waiting for the next record.
"""
# The tests will patch this method to feed data
pass
def getFieldNames(self):
"""[ABC method implementation]"""
return self._fieldNames
def getFields(self):
"""[ABC method implementation]"""
return self._fieldsMeta
# Satisfy Abstract Base Class requirements for the ABC RecordStreamIface
# methods that are no-ops for the currently-implemented tests.
close = None
getRecordsRange = None
getNextRecordIdx = None
getLastRecords = None
removeOldData = None
appendRecord=None
appendRecords = None
getBookmark = None
recordsExistAfter = None
seekFromEnd = None
getStats = None
clearStats = None
getError = None
setError = None
isCompleted = None
setCompleted = None
setTimeout = None
flush = None
def testRewindBeforeModelRecordEncoderIsCreated(self):
fields = [
FieldMetaInfo('name', FieldMetaType.string,
FieldMetaSpecial.none),
]
stream = self.MyRecordStream(fields)
# Check that it doesn't crash by trying to operate on an absent encoder
self.assertIsNone(stream._modelRecordEncoder)
stream.rewind()
def testGetNextRecordDictWithResetFieldWithoutSequenceField(self):
fields = [
FieldMetaInfo('name', FieldMetaType.string,
FieldMetaSpecial.none),
FieldMetaInfo('timestamp', FieldMetaType.datetime,
FieldMetaSpecial.timestamp),
FieldMetaInfo('integer', FieldMetaType.integer,
FieldMetaSpecial.none),
FieldMetaInfo('real', FieldMetaType.float,
FieldMetaSpecial.none),
FieldMetaInfo('reset', FieldMetaType.integer,
FieldMetaSpecial.reset),
FieldMetaInfo('categories', FieldMetaType.list,
FieldMetaSpecial.category)
]
stream = self.MyRecordStream(fields)
with mock.patch.object(
stream, 'getNextRecord', autospec=True,
return_value=['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1,
[0, 1, 2]]):
result = stream.getNextRecordDict()
self.assertEqual(
result,
{
'name': 'rec_1',
'timestamp': datetime(2010, 3, 1, 0, 0),
'integer': 5,
'real': 6.5,
'reset': 1,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 1,
'_sequenceId': 0,
'_timestamp': datetime(2010, 3, 1, 0, 0),
'_timestampRecordIdx': None })
# One more time to verify incremeting sequence id
with mock.patch.object(
stream, 'getNextRecord', autospec=True,
return_value=['rec_2', datetime(day=2, month=3, year=2010), 5, 6.5, 1,
[0, 1, 2]]):
result = stream.getNextRecordDict()
self.assertEqual(
result,
{
'name': 'rec_2',
'timestamp': datetime(2010, 3, 2, 0, 0),
'integer': 5,
'real': 6.5,
'reset': 1,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 1,
'_sequenceId': 1,
'_timestamp': datetime(2010, 3, 2, 0, 0),
'_timestampRecordIdx': None })
# Now with reset turned off, expecting no change to sequence id
with mock.patch.object(
stream, 'getNextRecord', autospec=True,
return_value=['rec_3', datetime(day=3, month=3, year=2010), 5, 6.5, 0,
[0, 1, 2]]):
result = stream.getNextRecordDict()
self.assertEqual(
result,
{
'name': 'rec_3',
'timestamp': datetime(2010, 3, 3, 0, 0),
'integer': 5,
'real': 6.5,
'reset': 0,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 0,
'_sequenceId': 1,
'_timestamp': datetime(2010, 3, 3, 0, 0),
'_timestampRecordIdx': None })
# Now check that rewind resets sequence id
with mock.patch.object(
stream, 'getNextRecord', autospec=True,
return_value=['rec_4', datetime(day=4, month=3, year=2010), 5, 6.5, 1,
[0, 1, 2]]):
stream.rewind()
result = stream.getNextRecordDict()
self.assertEqual(
result,
{
'name': 'rec_4',
'timestamp': datetime(2010, 3, 4, 0, 0),
'integer': 5,
'real': 6.5,
'reset': 1,
'categories': [0, 1, 2],
'_category': [0, 1, 2],
'_reset': 1,
'_sequenceId': 0,
'_timestamp': datetime(2010, 3, 4, 0, 0),
'_timestampRecordIdx': None })
if __name__ == "__main__":
unittest.main()
| 15,103 | Python | .py | 421 | 27.308789 | 81 | 0.571487 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,071 | functionsource_test.py | numenta_nupic-legacy/tests/unit/nupic/data/functionsource_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Unit tests for functionsource.
"""
import pickle
import unittest
from nupic.data import FunctionSource
def dataFunction(stat):
ret = {"reset": 0, "sequence": 0, "data": 0}
if stat is not None:
val = stat.get("val", 0) + 1
ret["val"] = stat["val"] = val
return ret
class FunctionSourceTest(unittest.TestCase):
def testDefaultArgs(self):
fs = FunctionSource(dataFunction, state=None, resetFieldName=None,
sequenceIdFieldName=None)
self.assertIsNotNone(fs)
r = fs.getNextRecordDict()
self.assertIsNotNone(r)
def testResetField(self):
fs = FunctionSource(dataFunction, state=None, resetFieldName="reset",
sequenceIdFieldName=None)
self.assertIsNotNone(fs)
r = fs.getNextRecordDict()
self.assertIsNotNone(r)
def testSequenceField(self):
fs = FunctionSource(dataFunction, state=None, resetFieldName=None,
sequenceIdFieldName="sequence")
self.assertIsNotNone(fs)
r = fs.getNextRecordDict()
self.assertIsNotNone(r)
def testResetAndSequenceFields(self):
fs = FunctionSource(dataFunction, state=None, resetFieldName="reset",
sequenceIdFieldName="sequence")
self.assertIsNotNone(fs)
r = fs.getNextRecordDict()
self.assertIsNotNone(r)
def testState(self):
state = dict(val=100)
fs = FunctionSource(dataFunction, state=state, resetFieldName="reset",
sequenceIdFieldName="sequence")
self.assertIsNotNone(fs)
r = fs.getNextRecordDict()
self.assertIsNotNone(r)
r = fs.getNextRecordDict()
r = fs.getNextRecordDict()
self.assertEqual(103, state["val"])
def testPickle(self):
state = dict(val=100)
fs = FunctionSource(dataFunction, state=state, resetFieldName="reset",
sequenceIdFieldName="sequence")
self.assertIsNotNone(fs)
r = fs.getNextRecordDict()
self.assertIsNotNone(r)
pkl = pickle.dumps(fs)
self.assertIsNotNone(pkl)
fs2 = pickle.loads(pkl)
self.assertIsNotNone(fs2)
r = fs2.getNextRecordDict()
r = fs2.getNextRecordDict()
self.assertEqual(103, fs2.state["val"])
if __name__ == "__main__":
unittest.main()
| 3,217 | Python | .py | 83 | 33.650602 | 74 | 0.679318 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,072 | fieldmeta_test.py | numenta_nupic-legacy/tests/unit/nupic/data/fieldmeta_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for nupic.data.fieldmeta."""
import unittest2 as unittest
from nupic.data.field_meta import FieldMetaInfo, FieldMetaType, FieldMetaSpecial
class FieldMetaTest(unittest.TestCase):
"""FieldMetaInfo unit tests."""
def testFieldMetaInfo(self):
# Create a single FieldMetaInfo instance from a File field"s meta-data tuple
e = ("pounds", FieldMetaType.float, FieldMetaSpecial.none)
m = FieldMetaInfo.createFromFileFieldElement(e)
self.assertEqual(e, m)
# Create a list of FieldMetaInfo instances from a list of File meta-data
# tuples
el = [("pounds", FieldMetaType.float, FieldMetaSpecial.none),
("price", FieldMetaType.float, FieldMetaSpecial.none),
("id", FieldMetaType.string, FieldMetaSpecial.sequence),
("date", FieldMetaType.datetime, FieldMetaSpecial.timestamp),
]
ml = FieldMetaInfo.createListFromFileFieldList(el)
self.assertEqual(el, ml)
def testFieldMetaInfoRaisesValueErrorOnInvalidFieldType(self):
with self.assertRaises(ValueError):
FieldMetaInfo("fieldName", "bogus-type", FieldMetaSpecial.none)
def testFieldMetaInfoRaisesValueErrorOnInvalidFieldSpecial(self):
with self.assertRaises(ValueError):
FieldMetaInfo("fieldName", FieldMetaType.integer, "bogus-special")
def testFieldMetaSpecialIsValid(self):
self.assertEqual(FieldMetaSpecial.isValid(FieldMetaSpecial.none), True)
self.assertEqual(FieldMetaSpecial.isValid(FieldMetaSpecial.reset), True)
self.assertEqual(FieldMetaSpecial.isValid(FieldMetaSpecial.sequence), True)
self.assertEqual(FieldMetaSpecial.isValid(FieldMetaSpecial.timestamp), True)
self.assertEqual(FieldMetaSpecial.isValid(FieldMetaSpecial.category), True)
self.assertEqual(FieldMetaSpecial.isValid(FieldMetaSpecial.learning), True)
self.assertEqual(FieldMetaSpecial.isValid("bogus-special"), False)
def testFieldMetaTypeIsValid(self):
self.assertEqual(FieldMetaType.isValid(FieldMetaType.string), True)
self.assertEqual(FieldMetaType.isValid(FieldMetaType.datetime), True)
self.assertEqual(FieldMetaType.isValid(FieldMetaType.integer), True)
self.assertEqual(FieldMetaType.isValid(FieldMetaType.float), True)
self.assertEqual(FieldMetaType.isValid(FieldMetaType.boolean), True)
self.assertEqual(FieldMetaType.isValid(FieldMetaType.list), True)
self.assertEqual(FieldMetaType.isValid(FieldMetaType.sdr), True)
self.assertEqual(FieldMetaType.isValid("bogus-type"), False)
if __name__ == "__main__":
unittest.main()
| 3,531 | Python | .py | 64 | 51.15625 | 80 | 0.75167 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,073 | file_record_stream_test.py | numenta_nupic-legacy/tests/unit/nupic/data/file_record_stream_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import tempfile
import unittest
from datetime import datetime
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.field_meta import FieldMetaInfo, FieldMetaType, FieldMetaSpecial
from nupic.data.file_record_stream import FileRecordStream
from nupic.data.utils import (
parseTimestamp, serializeTimestamp, escape, unescape)
def _getTempFileName():
"""Creates unique file name that starts with 'test' and ends with '.txt'."""
handle = tempfile.NamedTemporaryFile(prefix='test', suffix='.txt', dir='.')
filename = handle.name
handle.close()
return filename
class TestFileRecordStream(unittest.TestCase):
def testBasic(self):
"""Runs basic FileRecordStream tests."""
filename = _getTempFileName()
# Write a standard file
fields = [FieldMetaInfo('name', FieldMetaType.string,
FieldMetaSpecial.none),
FieldMetaInfo('timestamp', FieldMetaType.datetime,
FieldMetaSpecial.timestamp),
FieldMetaInfo('integer', FieldMetaType.integer,
FieldMetaSpecial.none),
FieldMetaInfo('real', FieldMetaType.float,
FieldMetaSpecial.none),
FieldMetaInfo('reset', FieldMetaType.integer,
FieldMetaSpecial.reset),
FieldMetaInfo('sid', FieldMetaType.string,
FieldMetaSpecial.sequence),
FieldMetaInfo('categoryField', FieldMetaType.integer,
FieldMetaSpecial.category),]
fieldNames = ['name', 'timestamp', 'integer', 'real', 'reset', 'sid',
'categoryField']
print 'Creating temp file:', filename
with FileRecordStream(streamID=filename, write=True, fields=fields) as s:
self.assertEqual(0, s.getDataRowCount())
# Records
records = (
['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1, 'seq-1', 10],
['rec_2', datetime(day=2, month=3, year=2010), 8, 7.5, 0, 'seq-1', 11],
['rec_3', datetime(day=3, month=3, year=2010), 12, 8.5, 0, 'seq-1', 12])
self.assertEqual(fields, s.getFields())
self.assertEqual(0, s.getNextRecordIdx())
print 'Writing records ...'
for r in records:
print list(r)
s.appendRecord(list(r))
self.assertEqual(3, s.getDataRowCount())
recordsBatch = (
['rec_4', datetime(day=4, month=3, year=2010), 2, 9.5, 1, 'seq-1', 13],
['rec_5', datetime(day=5, month=3, year=2010), 6, 10.5, 0, 'seq-1', 14],
['rec_6', datetime(day=6, month=3, year=2010), 11, 11.5, 0, 'seq-1', 15]
)
print 'Adding batch of records...'
for rec in recordsBatch:
print rec
s.appendRecords(recordsBatch)
self.assertEqual(6, s.getDataRowCount())
with FileRecordStream(filename) as s:
# Read the standard file
self.assertEqual(6, s.getDataRowCount())
self.assertEqual(fieldNames, s.getFieldNames())
# Note! this is the number of records read so far
self.assertEqual(0, s.getNextRecordIdx())
readStats = s.getStats()
print 'Got stats:', readStats
expectedStats = {
'max': [None, None, 12, 11.5, 1, None, 15],
'min': [None, None, 2, 6.5, 0, None, 10]
}
self.assertEqual(expectedStats, readStats)
readRecords = []
print 'Reading records ...'
while True:
r = s.getNextRecord()
print r
if r is None:
break
readRecords.append(r)
allRecords = records + recordsBatch
for r1, r2 in zip(allRecords, readRecords):
self.assertEqual(r1, r2)
def testMultipleClasses(self):
"""Runs FileRecordStream tests with multiple category fields."""
filename = _getTempFileName()
# Write a standard file
fields = [
FieldMetaInfo('name', FieldMetaType.string,
FieldMetaSpecial.none),
FieldMetaInfo('timestamp', FieldMetaType.datetime,
FieldMetaSpecial.timestamp),
FieldMetaInfo('integer', FieldMetaType.integer,
FieldMetaSpecial.none),
FieldMetaInfo('real', FieldMetaType.float,
FieldMetaSpecial.none),
FieldMetaInfo('reset', FieldMetaType.integer,
FieldMetaSpecial.reset),
FieldMetaInfo('sid', FieldMetaType.string,
FieldMetaSpecial.sequence),
FieldMetaInfo('categories', FieldMetaType.list,
FieldMetaSpecial.category)]
fieldNames = ['name', 'timestamp', 'integer', 'real', 'reset', 'sid',
'categories']
print 'Creating temp file:', filename
with FileRecordStream(streamID=filename, write=True, fields=fields) as s:
self.assertEqual(0, s.getDataRowCount())
# Records
records = (
['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1, 'seq-1',
[0, 1, 2]],
['rec_2', datetime(day=2, month=3, year=2010), 8, 7.5, 0, 'seq-1',
[3, 4, 5,]],
['rec_3', datetime(day=3, month=3, year=2010), 2, 8.5, 0, 'seq-1',
[6, 7, 8,]])
self.assertEqual(fields, s.getFields())
self.assertEqual(0, s.getNextRecordIdx())
print 'Writing records ...'
for r in records:
print r
s.appendRecord(r)
self.assertEqual(3, s.getDataRowCount())
recordsBatch = (
['rec_4', datetime(day=4, month=3, year=2010), 2, 9.5, 1, 'seq-1',
[2, 3, 4]],
['rec_5', datetime(day=5, month=3, year=2010), 6, 10.5, 0, 'seq-1',
[3, 4, 5]],
['rec_6', datetime(day=6, month=3, year=2010), 11, 11.5, 0, 'seq-1',
[4, 5, 6]])
print 'Adding batch of records...'
for rec in recordsBatch:
print rec
s.appendRecords(recordsBatch)
self.assertEqual(6, s.getDataRowCount())
with FileRecordStream(filename) as s:
# Read the standard file
self.assertEqual(6, s.getDataRowCount())
self.assertEqual(fieldNames, s.getFieldNames())
# Note! this is the number of records read so far
self.assertEqual(0, s.getNextRecordIdx())
readStats = s.getStats()
print 'Got stats:', readStats
expectedStats = {
'max': [None, None, 11, 11.5, 1, None, None],
'min': [None, None, 2, 6.5, 0, None, None]
}
self.assertEqual(expectedStats, readStats)
readRecords = []
print 'Reading records ...'
while True:
r = s.getNextRecord()
print r
if r is None:
break
readRecords.append(r)
expectedRecords = (
['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1, 'seq-1',
[0, 1, 2]],
['rec_2', datetime(day=2, month=3, year=2010), 8, 7.5, 0, 'seq-1',
[3, 4, 5]],
['rec_3', datetime(day=3, month=3, year=2010), 2, 8.5, 0, 'seq-1',
[6, 7, 8]],
['rec_4', datetime(day=4, month=3, year=2010), 2, 9.5, 1, 'seq-1',
[2, 3, 4]],
['rec_5', datetime(day=5, month=3, year=2010), 6, 10.5, 0, 'seq-1',
[3, 4, 5]],
['rec_6', datetime(day=6, month=3, year=2010), 11, 11.5, 0, 'seq-1',
[4, 5, 6]])
for r1, r2 in zip(expectedRecords, readRecords):
self.assertEqual(r1, r2)
def testEscapeUnescape(self):
s = '1,2\n4,5'
e = escape(s)
u = unescape(e)
self.assertEqual(u, s)
def testParseSerializeTimestamp(self):
t = datetime.now()
s = serializeTimestamp(t)
self.assertEqual(t, parseTimestamp(s))
def testBadDataset(self):
filename = _getTempFileName()
print 'Creating tempfile:', filename
# Write bad dataset with records going backwards in time
fields = [FieldMetaInfo('timestamp', FieldMetaType.datetime,
FieldMetaSpecial.timestamp)]
o = FileRecordStream(streamID=filename, write=True, fields=fields)
# Records
records = (
[datetime(day=3, month=3, year=2010)],
[datetime(day=2, month=3, year=2010)])
o.appendRecord(records[0])
o.appendRecord(records[1])
o.close()
# Write bad dataset with broken sequences
fields = [FieldMetaInfo('sid', FieldMetaType.integer,
FieldMetaSpecial.sequence)]
o = FileRecordStream(streamID=filename, write=True, fields=fields)
# Records
records = ([1], [2], [1])
o.appendRecord(records[0])
o.appendRecord(records[1])
self.assertRaises(Exception, o.appendRecord, (records[2],))
o.close()
def testMissingValues(self):
print "Beginning Missing Data test..."
filename = _getTempFileName()
# Some values missing of each type
# read dataset from disk, retrieve values
# string should return empty string, numeric types sentinelValue
print 'Creating tempfile:', filename
# write dataset to disk with float, int, and string fields
fields = [FieldMetaInfo('timestamp', FieldMetaType.datetime,
FieldMetaSpecial.timestamp),
FieldMetaInfo('name', FieldMetaType.string,
FieldMetaSpecial.none),
FieldMetaInfo('integer', FieldMetaType.integer,
FieldMetaSpecial.none),
FieldMetaInfo('real', FieldMetaType.float,
FieldMetaSpecial.none)]
s = FileRecordStream(streamID=filename, write=True, fields=fields)
# Records
records = (
[datetime(day=1, month=3, year=2010), 'rec_1', 5, 6.5],
[datetime(day=2, month=3, year=2010), '', 8, 7.5],
[datetime(day=3, month=3, year=2010), 'rec_3', '', 8.5],
[datetime(day=4, month=3, year=2010), 'rec_4', 12, ''],
[datetime(day=5, month=3, year=2010), 'rec_5', -87657496599, 6.5],
[datetime(day=6, month=3, year=2010), 'rec_6', 12, -87657496599],
[datetime(day=6, month=3, year=2010), str(-87657496599), 12, 6.5])
for r in records:
s.appendRecord(list(r))
s.close()
# Read the standard file
s = FileRecordStream(streamID=filename, write=False)
fieldsRead = s.getFields()
self.assertEqual(fields, fieldsRead)
recordsRead = []
while True:
r = s.getNextRecord()
if r is None:
break
print 'Reading record ...'
print r
recordsRead.append(r)
# sort the records by date, so we know for sure which is which
sorted(recordsRead, key=lambda rec: rec[0])
# empty string
self.assertEqual(SENTINEL_VALUE_FOR_MISSING_DATA, recordsRead[1][1])
# missing int
self.assertEqual(SENTINEL_VALUE_FOR_MISSING_DATA, recordsRead[2][2])
# missing float
self.assertEqual(SENTINEL_VALUE_FOR_MISSING_DATA, recordsRead[3][3])
# sentinel value in input handled correctly for int field
self.assertNotEqual(SENTINEL_VALUE_FOR_MISSING_DATA, recordsRead[4][2])
# sentinel value in input handled correctly for float field
self.assertNotEqual(SENTINEL_VALUE_FOR_MISSING_DATA, recordsRead[5][3])
# sentinel value in input handled correctly for string field
# this should leave the string as-is, since a missing string
# is encoded not with a sentinel value but with an empty string
self.assertNotEqual(SENTINEL_VALUE_FOR_MISSING_DATA, recordsRead[6][1])
if __name__ == '__main__':
unittest.main()
| 12,411 | Python | .py | 283 | 35.462898 | 80 | 0.617984 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,074 | utils_test.py | numenta_nupic-legacy/tests/unit/nupic/data/utils_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for nupic.data.utils."""
from datetime import datetime
from nupic.data import utils
from nupic.support.unittesthelpers.testcasebase import (TestCaseBase,
unittest)
class UtilsTest(TestCaseBase):
"""Utility unit tests."""
def testParseTimestamp(self):
expectedResults = (
('2011-09-08T05:30:32.920000Z', datetime(2011, 9, 8, 5, 30, 32, 920000)),
('2011-09-08T05:30:32Z', datetime(2011, 9, 8, 5, 30, 32, 0)),
('2011-09-08T05:30:32', datetime(2011, 9, 8, 5, 30, 32, 0)),
('2011-09-08 05:30:32:920000', datetime(2011, 9, 8, 5, 30, 32, 920000)),
('2011-09-08 05:30:32.920000', datetime(2011, 9, 8, 5, 30, 32, 920000)),
('2011-09-08 5:30:32:92', datetime(2011, 9, 8, 5, 30, 32, 920000)),
('2011-09-08 5:30:32', datetime(2011, 9, 8, 5, 30, 32)),
('2011-09-08 5:30', datetime(2011, 9, 8, 5, 30)),
('2011-09-08', datetime(2011, 9, 8)))
for timestamp, dt in expectedResults:
self.assertEqual(utils.parseTimestamp(timestamp), dt)
def testSerializeTimestamp(self):
self.assertEqual(
utils.serializeTimestamp(datetime(2011, 9, 8, 5, 30, 32, 920000)),
'2011-09-08 05:30:32.920000')
def testSerializeTimestampNoMS(self):
self.assertEqual(
utils.serializeTimestampNoMS(datetime(2011, 9, 8, 5, 30, 32, 920000)),
'2011-09-08 05:30:32')
def testParseSdr(self):
self.assertSequenceEqual(utils.parseSdr("000101000"), [0, 0, 0, 1, 0, 1, 0, 0, 0])
def testSerializeSdr(self):
self.assertSequenceEqual(utils.serializeSdr([0, 0, 0, 1, 0, 1, 0, 0, 0]), "000101000")
def testParseStringList(self):
stringLists = ["", "0", "0 1"]
expectedResults = [[], [0], [0, 1]]
for s, r in zip(stringLists, expectedResults):
self.assertSequenceEqual(r, utils.parseStringList(s))
def testStripList(self):
lists = [[], [0], [0, 1]]
expectedResults = ["", "0", "0 1"]
for listObj, r in zip(lists, expectedResults):
self.assertSequenceEqual(r, utils.stripList(listObj))
if __name__ == '__main__':
unittest.main()
| 3,120 | Python | .py | 64 | 43.71875 | 90 | 0.63741 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,075 | aggregator_test.py | numenta_nupic-legacy/tests/unit/nupic/data/aggregator_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for aggregator module."""
import unittest2 as unittest
from nupic.data import aggregator
class AggregatorTest(unittest.TestCase):
"""Unit tests for misc. aggregator functions."""
def testFixAggregationDict(self):
# Simplest case.
result = aggregator._aggr_weighted_mean((1.0, 1.0), (1, 1))
self.assertAlmostEqual(result, 1.0, places=7)
# Simple non-uniform case.
result = aggregator._aggr_weighted_mean((1.0, 2.0), (1, 2))
self.assertAlmostEqual(result, 5.0/3.0, places=7)
# Make sure it handles integer values as integers.
result = aggregator._aggr_weighted_mean((1, 2), (1, 2))
self.assertAlmostEqual(result, 1, places=7)
# More-than-two case.
result = aggregator._aggr_weighted_mean((1.0, 2.0, 3.0), (1, 2, 3))
self.assertAlmostEqual(result, 14.0/6.0, places=7)
# Handle zeros.
result = aggregator._aggr_weighted_mean((1.0, 0.0, 3.0), (1, 2, 3))
self.assertAlmostEqual(result, 10.0/6.0, places=7)
# Handle negative numbers.
result = aggregator._aggr_weighted_mean((1.0, -2.0, 3.0), (1, 2, 3))
self.assertAlmostEqual(result, 1.0, places=7)
if __name__ == '__main__':
unittest.main()
| 2,161 | Python | .py | 46 | 44.108696 | 72 | 0.672046 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,076 | sequence_machine_test.py | numenta_nupic-legacy/tests/unit/nupic/data/generators/sequence_machine_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
from nupic.data.generators.pattern_machine import (
PatternMachine, ConsecutivePatternMachine)
from nupic.data.generators.sequence_machine import SequenceMachine
class SequenceMachineTest(unittest.TestCase):
def setUp(self):
self.patternMachine = ConsecutivePatternMachine(100, 5)
self.sequenceMachine = SequenceMachine(self.patternMachine)
def testGenerateFromNumbers(self):
numbers = range(0, 10) + [None] + range(10, 19)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.assertEqual(len(sequence), 20)
self.assertEqual(sequence[0], self.patternMachine.get(0))
self.assertEqual(sequence[10], None)
self.assertEqual(sequence[11], self.patternMachine.get(10))
def testAddSpatialNoise(self):
patternMachine = PatternMachine(10000, 1000, num=100)
sequenceMachine = SequenceMachine(patternMachine)
numbers = range(0, 100)
numbers.append(None)
sequence = sequenceMachine.generateFromNumbers(numbers)
noisy = sequenceMachine.addSpatialNoise(sequence, 0.5)
overlap = len(noisy[0] & patternMachine.get(0))
self.assertTrue(400 < overlap < 600)
sequence = sequenceMachine.generateFromNumbers(numbers)
noisy = sequenceMachine.addSpatialNoise(sequence, 0.0)
overlap = len(noisy[0] & patternMachine.get(0))
self.assertEqual(overlap, 1000)
def testGenerateNumbers(self):
numbers = self.sequenceMachine.generateNumbers(1, 100)
self.assertEqual(numbers[-1], None)
self.assertEqual(len(numbers), 101)
self.assertFalse(numbers[:-1] == range(0, 100))
self.assertEqual(sorted(numbers[:-1]), range(0, 100))
def testGenerateNumbersMultipleSequences(self):
numbers = self.sequenceMachine.generateNumbers(3, 100)
self.assertEqual(len(numbers), 303)
self.assertEqual(sorted(numbers[0:100]), range(0, 100))
self.assertEqual(sorted(numbers[101:201]), range(100, 200))
self.assertEqual(sorted(numbers[202:302]), range(200, 300))
def testGenerateNumbersWithShared(self):
numbers = self.sequenceMachine.generateNumbers(3, 100, (20, 35))
self.assertEqual(len(numbers), 303)
shared = range(300, 315)
self.assertEqual(numbers[20:35], shared)
self.assertEqual(numbers[20+101:35+101], shared)
self.assertEqual(numbers[20+202:35+202], shared)
if __name__ == '__main__':
unittest.main()
| 3,346 | Python | .py | 69 | 44.782609 | 72 | 0.726322 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,077 | anomalyzer_test.py | numenta_nupic-legacy/tests/unit/nupic/data/generators/anomalyzer_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for the anomalyzer."""
import csv
from mock import MagicMock, patch
from StringIO import StringIO
import unittest2 as unittest
from nupic.data.file_record_stream import FileRecordStream
from nupic.data.generators import anomalyzer
class AnomalyzerTest(unittest.TestCase):
"""Tests for the anomalyzer."""
def setUp(self):
self.sampleInput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 2:00:00.0,1\n"
"2011-09-04 2:05:00.0,2\n"
"2011-09-04 2:10:00.0,3\n"
"2011-09-04 2:15:00.0,4\n"
"2011-09-04 2:20:00.0,5\n"
"2011-09-04 2:25:00.0,6")
def testAddBeginning(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,9\n"
"2011-09-04 02:05:00.000000,10\n"
"2011-09-04 02:10:00.000000,3\n"
"2011-09-04 02:15:00.000000,4\n"
"2011-09-04 02:20:00.000000,5\n"
"2011-09-04 02:25:00.000000,6\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.add(inputFile, outputFile, 1, 0, 1, 8)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testAddMiddle(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,1\n"
"2011-09-04 02:05:00.000000,10\n"
"2011-09-04 02:10:00.000000,11\n"
"2011-09-04 02:15:00.000000,4\n"
"2011-09-04 02:20:00.000000,5\n"
"2011-09-04 02:25:00.000000,6\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.add(inputFile, outputFile, 1, 1, 2, 8)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testAddEnd(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,1\n"
"2011-09-04 02:05:00.000000,2\n"
"2011-09-04 02:10:00.000000,3\n"
"2011-09-04 02:15:00.000000,4\n"
"2011-09-04 02:20:00.000000,13\n"
"2011-09-04 02:25:00.000000,14\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.add(inputFile, outputFile, 1, 4, 5, 8)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testAddSingle(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,1\n"
"2011-09-04 02:05:00.000000,10\n"
"2011-09-04 02:10:00.000000,3\n"
"2011-09-04 02:15:00.000000,4\n"
"2011-09-04 02:20:00.000000,5\n"
"2011-09-04 02:25:00.000000,6\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.add(inputFile, outputFile, 1, 1, 1, 8)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testAddAll(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,9\n"
"2011-09-04 02:05:00.000000,10\n"
"2011-09-04 02:10:00.000000,11\n"
"2011-09-04 02:15:00.000000,12\n"
"2011-09-04 02:20:00.000000,13\n"
"2011-09-04 02:25:00.000000,14\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.add(inputFile, outputFile, 1, 0, 5, 8)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testScale(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,1\n"
"2011-09-04 02:05:00.000000,16\n"
"2011-09-04 02:10:00.000000,24\n"
"2011-09-04 02:15:00.000000,4\n"
"2011-09-04 02:20:00.000000,5\n"
"2011-09-04 02:25:00.000000,6\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.scale(inputFile, outputFile, 1, 1, 2, 8)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testCopyAllImplicit(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,1\n"
"2011-09-04 02:05:00.000000,2\n"
"2011-09-04 02:10:00.000000,3\n"
"2011-09-04 02:15:00.000000,4\n"
"2011-09-04 02:20:00.000000,5\n"
"2011-09-04 02:25:00.000000,6\n"
"2011-09-04 02:30:00.000000,1\n"
"2011-09-04 02:35:00.000000,2\n"
"2011-09-04 02:40:00.000000,3\n"
"2011-09-04 02:45:00.000000,4\n"
"2011-09-04 02:50:00.000000,5\n"
"2011-09-04 02:55:00.000000,6\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.copy(inputFile, outputFile, 0, 5)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testCopyAllExplicit(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,1\n"
"2011-09-04 02:05:00.000000,2\n"
"2011-09-04 02:10:00.000000,3\n"
"2011-09-04 02:15:00.000000,4\n"
"2011-09-04 02:20:00.000000,5\n"
"2011-09-04 02:25:00.000000,6\n"
"2011-09-04 02:30:00.000000,1\n"
"2011-09-04 02:35:00.000000,2\n"
"2011-09-04 02:40:00.000000,3\n"
"2011-09-04 02:45:00.000000,4\n"
"2011-09-04 02:50:00.000000,5\n"
"2011-09-04 02:55:00.000000,6\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.copy(inputFile, outputFile, 0, 5, 6)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testCopyBeginning(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,1\n"
"2011-09-04 02:05:00.000000,2\n"
"2011-09-04 02:10:00.000000,1\n"
"2011-09-04 02:15:00.000000,2\n"
"2011-09-04 02:20:00.000000,3\n"
"2011-09-04 02:25:00.000000,4\n"
"2011-09-04 02:30:00.000000,5\n"
"2011-09-04 02:35:00.000000,6\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.copy(inputFile, outputFile, 0, 1, 0)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testCopyOneRow(self):
expectedOutput = ("Timestamp,Value\n"
"datetime,int\n"
"T,\n"
"2011-09-04 02:00:00.000000,1\n"
"2011-09-04 02:05:00.000000,2\n"
"2011-09-04 02:10:00.000000,2\n"
"2011-09-04 02:15:00.000000,3\n"
"2011-09-04 02:20:00.000000,4\n"
"2011-09-04 02:25:00.000000,5\n"
"2011-09-04 02:30:00.000000,6\n")
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.copy(inputFile, outputFile, 1, 1, 1)
result = output.getvalue()
result = result.replace("\r\n", "\n")
result = result.replace("\r", "\n")
self.assertSequenceEqual(expectedOutput, result)
def testSample(self):
mockInput = MagicMock(return_value=StringIO(self.sampleInput))
output = StringIO()
mockOutput = MagicMock(return_value=output)
with patch("__builtin__.open", mockInput):
inputFile = FileRecordStream("input_path")
with patch("__builtin__.open", mockOutput):
outputFile = FileRecordStream("output_path",
fields=inputFile.getFields(),
write=True)
anomalyzer.sample(inputFile, outputFile, 1)
result = StringIO(output.getvalue())
result.next()
result.next()
result.next()
reader = csv.reader(result)
_, value = reader.next()
self.assertIn(int(value), (1, 2, 3, 4, 5, 6))
self.assertRaises(StopIteration, result.next)
if __name__ == "__main__":
unittest.main()
| 14,916 | Python | .py | 315 | 33.974603 | 72 | 0.542559 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,078 | pattern_machine_test.py | numenta_nupic-legacy/tests/unit/nupic/data/generators/pattern_machine_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
from nupic.data.generators.pattern_machine import (PatternMachine,
ConsecutivePatternMachine)
class PatternMachineTest(unittest.TestCase):
def setUp(self):
self.patternMachine = PatternMachine(10000, 5, num=50)
def testGet(self):
patternA = self.patternMachine.get(48)
self.assertEqual(len(patternA), 5)
patternB = self.patternMachine.get(49)
self.assertEqual(len(patternB), 5)
self.assertEqual(patternA & patternB, set())
def testGetOutOfBounds(self):
args = [50]
self.assertRaises(IndexError, self.patternMachine.get, *args)
def testAddNoise(self):
patternMachine = PatternMachine(10000, 1000, num=1)
pattern = patternMachine.get(0)
noisy = patternMachine.addNoise(pattern, 0.0)
self.assertEqual(len(pattern & noisy), 1000)
noisy = patternMachine.addNoise(pattern, 0.5)
self.assertTrue(400 < len(pattern & noisy) < 600)
noisy = patternMachine.addNoise(pattern, 1.0)
self.assertTrue(50 < len(pattern & noisy) < 150)
def testNumbersForBit(self):
pattern = self.patternMachine.get(49)
for bit in pattern:
self.assertEqual(self.patternMachine.numbersForBit(bit), set([49]))
def testNumbersForBitOutOfBounds(self):
args = [10000]
self.assertRaises(IndexError, self.patternMachine.numbersForBit, *args)
def testNumberMapForBits(self):
pattern = self.patternMachine.get(49)
numberMap = self.patternMachine.numberMapForBits(pattern)
self.assertEqual(numberMap.keys(), [49])
self.assertEqual(numberMap[49], pattern)
def testWList(self):
w = [4, 7, 11]
patternMachine = PatternMachine(100, w, num=50)
widths = dict((el, 0) for el in w)
for i in range(50):
pattern = patternMachine.get(i)
width = len(pattern)
self.assertTrue(width in w)
widths[len(pattern)] += 1
for i in w:
self.assertTrue(widths[i] > 0)
class ConsecutivePatternMachineTest(unittest.TestCase):
def setUp(self):
self.patternMachine = ConsecutivePatternMachine(100, 5)
def testGet(self):
pattern = self.patternMachine.get(18)
self.assertEqual(len(pattern), 5)
self.assertEqual(pattern, set([90, 91, 92, 93, 94]))
pattern = self.patternMachine.get(19)
self.assertEqual(len(pattern), 5)
self.assertEqual(pattern, set([95, 96, 97, 98, 99]))
def testGetOutOfBounds(self):
args = [20]
self.assertRaises(IndexError, self.patternMachine.get, *args)
if __name__ == '__main__':
unittest.main()
| 3,539 | Python | .py | 82 | 38.45122 | 77 | 0.695258 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,079 | __init__.py | numenta_nupic-legacy/tests/unit/nupic/data/generators/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 979 | Python | .py | 20 | 47.95 | 73 | 0.668405 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,080 | knn_classifier_region_test.py | numenta_nupic-legacy/tests/unit/nupic/regions/knn_classifier_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the KNNClassifier region."""
import tempfile
import unittest
import numpy as np
from nupic.regions.knn_classifier_region import KNNClassifierRegion
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.knn_classifier_region_capnp import KNNClassifierRegionProto
class KNNClassifierRegionTest(unittest.TestCase):
"""KNNClassifierRegion unit tests."""
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
knn = KNNClassifierRegion(distanceMethod="norm", SVDDimCount=2,
SVDSampleCount=2, useSparseMemory=True,
minSparsity=0.1, distThreshold=0.1)
a = np.zeros(40)
a[[1, 3, 7, 11, 13, 17, 19, 23, 29]] = 1
b = np.zeros(40)
b[[2, 4, 8, 12, 14, 18, 20, 28, 30]] = 1
c = np.zeros(40)
c[[1, 2, 3, 14, 16, 19, 22, 24, 33]] = 1
d = np.zeros(40)
d[[2, 4, 8, 12, 14, 19, 22, 24, 33]] = 1
knn.setParameter('learningMode', None, True)
outputs = {
"categoriesOut": np.zeros((1,)),
"bestPrototypeIndices": np.zeros((1,)),
"categoryProbabilitiesOut": np.zeros((1,))
}
input_a = {
'categoryIn': [0],
'bottomUpIn': a
}
knn.compute(input_a, outputs)
input_b = {
'categoryIn': [1],
'bottomUpIn': b
}
knn.compute(input_b, outputs)
input_c = {
'categoryIn': [2],
'bottomUpIn': c,
'partitionIn': [211]
}
knn.compute(input_c, outputs)
input_d = {
'categoryIn': [1],
'bottomUpIn': d,
'partitionIn': [405]
}
knn.compute(input_d, outputs)
knn.setParameter('learningMode', None, False)
knn.setParameter('inferenceMode', None, True)
proto = KNNClassifierRegionProto.new_message()
knn.writeToProto(proto)
with tempfile.TemporaryFile() as f:
proto.write(f)
f.seek(0)
protoDeserialized = KNNClassifierRegionProto.read(f)
knnDeserialized = KNNClassifierRegion.readFromProto(protoDeserialized)
expected = {
"categoriesOut": np.zeros((1,)),
"bestPrototypeIndices": np.zeros((1,)),
"categoryProbabilitiesOut": np.zeros((1,))
}
actual = {
"categoriesOut": np.zeros((1,)),
"bestPrototypeIndices": np.zeros((1,)),
"categoryProbabilitiesOut": np.zeros((1,))
}
knn.compute(input_a, expected)
knnDeserialized.compute(input_a, actual)
self.assertItemsEqual(actual, expected)
knn.compute(input_d, expected)
knnDeserialized.compute(input_a, actual)
self.assertItemsEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
| 3,659 | Python | .py | 102 | 31 | 80 | 0.650057 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,081 | sdr_classifier_region_test.py | numenta_nupic-legacy/tests/unit/nupic/regions/sdr_classifier_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the SDRClassifier region."""
import os
import unittest2 as unittest
from nupic.engine import Network
from nupic.encoders import MultiEncoder
from nupic.data.file_record_stream import FileRecordStream
def _createNetwork():
"""Create a network with a RecordSensor region and a SDRClassifier region"""
network = Network()
network.addRegion('sensor', 'py.RecordSensor', '{}')
network.addRegion('classifier', 'py.SDRClassifierRegion', '{}')
_createSensorToClassifierLinks(network, 'sensor', 'classifier')
# Add encoder to sensor region.
sensorRegion = network.regions['sensor'].getSelf()
encoderParams = {'consumption': {'fieldname': 'consumption',
'resolution': 0.88,
'seed': 1,
'name': 'consumption',
'type': 'RandomDistributedScalarEncoder'}}
encoder = MultiEncoder()
encoder.addMultipleEncoders(encoderParams)
sensorRegion.encoder = encoder
# Add data source.
testDir = os.path.dirname(os.path.abspath(__file__))
inputFile = os.path.join(testDir, 'fixtures', 'gymdata-test.csv')
dataSource = FileRecordStream(streamID=inputFile)
sensorRegion.dataSource = dataSource
# Get and set what field index we want to predict.
network.regions['sensor'].setParameter('predictedField', 'consumption')
return network
def _createSensorToClassifierLinks(network, sensorRegionName,
classifierRegionName):
"""Create links from sensor region to classifier region."""
network.link(sensorRegionName, classifierRegionName, 'UniformLink', '',
srcOutput='bucketIdxOut', destInput='bucketIdxIn')
network.link(sensorRegionName, classifierRegionName, 'UniformLink', '',
srcOutput='actValueOut', destInput='actValueIn')
network.link(sensorRegionName, classifierRegionName, 'UniformLink', '',
srcOutput='categoryOut', destInput='categoryIn')
network.link(sensorRegionName, classifierRegionName, 'UniformLink', '',
srcOutput='dataOut', destInput='bottomUpIn')
class SDRClassifierRegionTest(unittest.TestCase):
""" SDRClassifier region unit tests."""
def setUp(self):
self.network = _createNetwork()
self.classifierRegion = self.network.regions['classifier']
def testActValueIn(self):
self.network.run(1) # Process 1 row of data
actValueIn = self.classifierRegion.getInputData('actValueIn')[0]
self.assertEquals(round(actValueIn, 1), 21.2) # only 1 precision digit
def testBucketIdxIn(self):
self.network.run(1) # Process 1 row of data
bucketIdxIn = self.classifierRegion.getInputData('bucketIdxIn')[0]
self.assertEquals(bucketIdxIn, 500)
if __name__ == "__main__":
unittest.main()
| 3,799 | Python | .py | 76 | 44.368421 | 78 | 0.693535 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,082 | tm_region_test.py | numenta_nupic-legacy/tests/unit/nupic/regions/tm_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""TMRegion unit tests."""
import tempfile
import unittest
try:
import capnp
except ImportError:
capnp = None
import numpy as np
from nupic.regions.tm_region import TMRegion
if capnp:
from nupic.regions.tm_region_capnp import TMRegionProto
class TMRegionTest(unittest.TestCase):
def checkTMRegionImpl(self, impl):
output1 = {
"bottomUpOut": np.zeros((40,)),
"topDownOut": np.zeros((10,)),
"activeCells": np.zeros((40,)),
"predictedActiveCells": np.zeros((40,)),
"anomalyScore": np.zeros((1,)),
"lrnActiveStateT": np.zeros((40,)),
}
output2 = {
"bottomUpOut": np.zeros((40,)),
"topDownOut": np.zeros((10,)),
"activeCells": np.zeros((40,)),
"predictedActiveCells": np.zeros((40,)),
"anomalyScore": np.zeros((1,)),
"lrnActiveStateT": np.zeros((40,)),
}
a = np.zeros(10, dtype="int32")
a[[1, 3, 7]] = 1
b = np.zeros(10, dtype="int32")
b[[2, 4, 8]] = 1
inputA = {
"bottomUpIn": a,
"resetIn": np.zeros(1),
"sequenceIdIn": np.zeros(1),
}
inputB = {
"bottomUpIn": b,
"resetIn": np.zeros(1),
"sequenceIdIn": np.zeros(1),
}
region1 = TMRegion(10, 10, 4, temporalImp=impl)
region1.initialize()
region1.compute(inputA, output1)
proto1 = TMRegionProto.new_message()
region1.writeToProto(proto1)
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = TMRegionProto.read(f)
region2 = TMRegion.readFromProto(proto2)
region1.compute(inputB, output1)
region2.compute(inputB, output2)
self.assertTrue(np.array_equal(output1["bottomUpOut"],
output2["bottomUpOut"]))
self.assertTrue(np.array_equal(output1["topDownOut"],
output2["topDownOut"]))
self.assertTrue(np.array_equal(output1["activeCells"],
output2["activeCells"]))
self.assertTrue(np.array_equal(output1["predictedActiveCells"],
output2["predictedActiveCells"]))
self.assertTrue(np.array_equal(output1["anomalyScore"],
output2["anomalyScore"]))
self.assertTrue(np.array_equal(output1["lrnActiveStateT"],
output2["lrnActiveStateT"]))
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteReadPy(self):
self.checkTMRegionImpl("py")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteReadCpp(self):
self.checkTMRegionImpl("cpp")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteReadTMPy(self):
self.checkTMRegionImpl("tm_py")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteReadTMCpp(self):
self.checkTMRegionImpl("tm_cpp")
if __name__ == "__main__":
unittest.main()
| 4,021 | Python | .py | 105 | 32.266667 | 72 | 0.642931 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,083 | regions_spec_test.py | numenta_nupic-legacy/tests/unit/nupic/regions/regions_spec_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest2 as unittest
from nupic.regions.spec import (Spec,
InputSpec,
OutputSpec,
ParameterSpec,
CommandSpec)
class KNNAnomalyClassifierRegionTest(unittest.TestCase):
def testInvalidInputSpec(self):
with self.assertRaises(Exception):
_x = InputSpec()
with self.assertRaises(Exception):
_x = InputSpec(dataType="int", count=-4)
with self.assertRaises(Exception):
_x = InputSpec(description=555, dataType="int", count=4)
def testValidInputSpec(self):
try:
x = InputSpec(dataType="int", count=4)
x.invariant()
x = InputSpec(description="description",
dataType="int",
count=3,
required=True,
regionLevel=True,
isDefaultInput=True,
requireSplitterMap=True)
x.invariant()
except:
self.fail("Got unexpected exception")
def testInvalidOutputSpec(self):
with self.assertRaises(Exception):
_x = OutputSpec()
with self.assertRaises(Exception):
_x = OutputSpec(dataType="int", count=4, isDefaultOutput="Sure")
with self.assertRaises(Exception):
_x = OutputSpec(description=555, dataType="int", count=4)
def testValidOutputSpec(self):
try:
x = OutputSpec(dataType="int", count=4)
x.invariant()
x = OutputSpec(description="description",
dataType="int",
count=3,
regionLevel=True,
isDefaultOutput=True)
x.invariant()
except:
self.fail("Got unexpected exception")
def testInvalidParameterSpec(self):
with self.assertRaises(Exception):
_x = ParameterSpec()
with self.assertRaises(Exception):
_x = ParameterSpec(dataType="int", count=4, defaultValue="not an int")
with self.assertRaises(Exception):
_x = ParameterSpec(description=555, dataType="int")
with self.assertRaises(Exception):
_x = ParameterSpec(dataType="int",
accessMode="no such mode")
with self.assertRaises(Exception):
_x = ParameterSpec(dataType="int",
defaultValue=5,
accessMode="Read")
def testValidParameterSpec(self):
try:
x = ParameterSpec(dataType="int", accessMode="Read")
x.invariant()
x = ParameterSpec(description="description",
dataType="int",
count=3,
defaultValue=-6,
accessMode="Create")
x.invariant()
except:
self.fail("Got unexpected exception")
@unittest.skip("(#616) Disabled for now,"
"to add error checking in commandSpec later.")
def testInvalidCommandSpec(self):
with self.assertRaises(Exception):
_x = CommandSpec()
with self.assertRaises(Exception):
_x = CommandSpec(description=None)
with self.assertRaises(Exception):
_x = CommandSpec(description=3)
def testValidCommandSpec(self):
try:
x = CommandSpec("")
x.invariant()
x = CommandSpec(description="")
x.invariant()
x = CommandSpec(description="this is a command")
x.invariant()
except:
self.fail("Got unexpected exception")
@unittest.skip("(#617) Disabled for now,"
"to add error checking in Spec initializer later.")
def testInvalidSpec(self):
with self.assertRaises(Exception):
_x = Spec()
with self.assertRaises(Exception):
_x = Spec(description=3)
with self.assertRaises(Exception):
_x = Spec(description="123", singleNodeOnly=3)
def testValidSpec(self):
try:
x = Spec(description="123", singleNodeOnly=True)
x.invariant()
x = Spec(description="123", singleNodeOnly=True)
x.commands = dict(command1=CommandSpec("A command"),
command2=CommandSpec("Another command"))
x.invariant()
except:
self.fail("Got unexpected exception")
def testSpec_toDict(self):
x = Spec(description="123", singleNodeOnly=True)
d = x.toDict()
self.assertEqual(d["description"], "123")
self.assertTrue(d["singleNodeOnly"])
self.assertTrue(d["inputs"] == d["outputs"]
== d["parameters"] == d["commands"] == {})
x.inputs = dict(i1=InputSpec(dataType="int"),
i2=InputSpec(dataType="str", isDefaultInput=True))
x.outputs = dict(o=OutputSpec(dataType="float", count=8))
x.parameters = dict(p=ParameterSpec(description="param",
dataType="float",
defaultValue=3.14,
accessMode="Create"))
d = x.toDict()
inputs = d["inputs"]
self.assertEqual(len(inputs), 2)
i1 = inputs["i1"]
self.assertEqual(i1["count"], 1)
self.assertFalse(i1["isDefaultInput"])
self.assertEqual(i1["description"], "")
self.assertEqual(i1["dataType"], "int")
self.assertFalse(i1["required"])
self.assertTrue(i1["requireSplitterMap"])
self.assertFalse(i1["regionLevel"])
i2 = inputs["i2"]
self.assertEqual(i2["count"], 1)
self.assertTrue(i2["isDefaultInput"])
self.assertEqual(i2["description"], "")
self.assertEqual(i2["dataType"], "str")
self.assertFalse(i2["required"])
self.assertTrue(i2["requireSplitterMap"])
self.assertFalse(i2["regionLevel"])
outputs = d["outputs"]
self.assertEqual(len(outputs), 1)
o = outputs["o"]
self.assertEqual(o["count"], 8)
self.assertFalse(o["isDefaultOutput"])
self.assertEqual(o["description"], "")
self.assertEqual(o["dataType"], "float")
self.assertFalse(o["regionLevel"])
parameters = d["parameters"]
self.assertEqual(len(parameters), 1)
p = parameters["p"]
self.assertEqual(p["description"], "param")
self.assertEqual(p["dataType"], "float")
self.assertEqual(p["accessMode"], "Create")
self.assertEqual(p["defaultValue"], 3.14)
self.assertEqual(p["count"], 1)
self.assertEqual(p["constraints"], "")
self.assertEqual(d["commands"], {})
if __name__ == "__main__":
unittest.main()
| 7,274 | Python | .py | 184 | 31.413043 | 76 | 0.622052 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,084 | anomaly_likelihood_region_test.py | numenta_nupic-legacy/tests/unit/nupic/regions/anomaly_likelihood_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import tempfile
import unittest
import random
import csv
import numpy
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.algorithms.anomaly_likelihood_capnp import\
AnomalyLikelihoodProto as AnomalyLikelihoodRegionProto
from nupic.regions.anomaly_likelihood_region import AnomalyLikelihoodRegion
from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood
from pkg_resources import resource_filename
_INPUT_DATA_FILE = resource_filename(
"nupic.datafiles", "extra/hotgym/hotgym-anomaly.csv"
)
""" Unit tests for the anomaly likelihood region """
class AnomalyLikelihoodRegionTest(unittest.TestCase):
"""Tests for anomaly likelihood region"""
def testParamterError(self):
""" ensure historicWindowSize is greater than estimationSamples """
try:
anomalyLikelihoodRegion = AnomalyLikelihoodRegion(estimationSamples=100,
historicWindowSize=99)
self.assertEqual(False, True, "Should have failed with ValueError")
except ValueError:
pass
def testLikelihoodValues(self):
""" test to see if the region keeps track of state correctly and produces
the same likelihoods as the AnomalyLikelihood module """
anomalyLikelihoodRegion = AnomalyLikelihoodRegion()
anomalyLikelihood = AnomalyLikelihood()
inputs = AnomalyLikelihoodRegion.getSpec()['inputs']
outputs = AnomalyLikelihoodRegion.getSpec()['outputs']
with open (_INPUT_DATA_FILE) as f:
reader = csv.reader(f)
reader.next()
for record in reader:
consumption = float(record[1])
anomalyScore = float(record[2])
likelihood1 = anomalyLikelihood.anomalyProbability(
consumption, anomalyScore)
inputs['rawAnomalyScore'] = numpy.array([anomalyScore])
inputs['metricValue'] = numpy.array([consumption])
anomalyLikelihoodRegion.compute(inputs, outputs)
likelihood2 = outputs['anomalyLikelihood'][0]
self.assertEqual(likelihood1, likelihood2)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testSerialization(self):
""" test to ensure serialization preserves the state of the region
correctly. """
anomalyLikelihoodRegion1 = AnomalyLikelihoodRegion()
inputs = AnomalyLikelihoodRegion.getSpec()['inputs']
outputs = AnomalyLikelihoodRegion.getSpec()['outputs']
parameters = AnomalyLikelihoodRegion.getSpec()['parameters']
# Make sure to calculate distribution by passing the probation period
learningPeriod = parameters['learningPeriod']['defaultValue']
reestimationPeriod = parameters['reestimationPeriod']['defaultValue']
probation = learningPeriod + reestimationPeriod
for _ in xrange(0, probation + 1):
inputs['rawAnomalyScore'] = numpy.array([random.random()])
inputs['metricValue'] = numpy.array([random.random()])
anomalyLikelihoodRegion1.compute(inputs, outputs)
score1 = outputs['anomalyLikelihood'][0]
proto1 = AnomalyLikelihoodRegionProto.new_message()
anomalyLikelihoodRegion1.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = AnomalyLikelihoodRegionProto.read(f)
# # Load the deserialized proto
anomalyLikelihoodRegion2 = AnomalyLikelihoodRegion.read(proto2)
self.assertEqual(anomalyLikelihoodRegion1, anomalyLikelihoodRegion2)
window = parameters['historicWindowSize']['defaultValue']
for _ in xrange(0, window + 1):
inputs['rawAnomalyScore'] = numpy.array([random.random()])
inputs['metricValue'] = numpy.array([random.random()])
anomalyLikelihoodRegion1.compute(inputs, outputs)
score1 = outputs['anomalyLikelihood'][0]
anomalyLikelihoodRegion2.compute(inputs, outputs)
score2 = outputs['anomalyLikelihood'][0]
self.assertEqual(score1, score2)
if __name__ == "__main__":
unittest.main()
| 5,013 | Python | .py | 108 | 41.453704 | 78 | 0.725783 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,085 | anomaly_region_test.py | numenta_nupic-legacy/tests/unit/nupic/regions/anomaly_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import tempfile
import unittest
import numpy
from nupic.regions.anomaly_region import AnomalyRegion
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.AnomalyRegion_capnp import AnomalyRegionProto
class AnomalyRegionTest(unittest.TestCase):
"""Tests for anomaly region"""
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
predictedColumns = [[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]]
activeColumns = [[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[0, 1 ,0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0 ,0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[1, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0]]
anomalyExpected = (1.0, 0.25, 1.0/3.0, 2.0/3.0, 1.0, 2.0/3.0, 1.0,
0.0, 0.25, 0.25)
anomalyRegion1 = AnomalyRegion()
inputs = AnomalyRegion.getSpec()['inputs']
outputs = AnomalyRegion.getSpec()['outputs']
for i in xrange(0, 6):
inputs['predictedColumns'] = numpy.array(predictedColumns[i])
inputs['activeColumns'] = numpy.array(activeColumns[i])
anomalyRegion1.compute(inputs, outputs)
proto1 = AnomalyRegionProto.new_message()
anomalyRegion1.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = AnomalyRegionProto.read(f)
# Load the deserialized proto
anomalyRegion2 = AnomalyRegion.read(proto2)
self.assertEqual(anomalyRegion1, anomalyRegion2)
for i in xrange(6, 10):
inputs['predictedColumns'] = numpy.array(predictedColumns[i])
inputs['activeColumns'] = numpy.array(activeColumns[i])
anomalyRegion1.compute(inputs, outputs)
score1 = outputs['rawAnomalyScore'][0]
anomalyRegion2.compute(inputs, outputs)
score2 = outputs['rawAnomalyScore'][0]
self.assertAlmostEqual(
score1, anomalyExpected[i], places=5,
msg="Anomaly score of %f doesn't match expected of %f" % (
score1, anomalyExpected[i]))
self.assertAlmostEqual(
score2, anomalyExpected[i], places=5,
msg="Anomaly score of %f doesn't match expected of %f" % (
score2, anomalyExpected[i]))
if __name__ == "__main__":
unittest.main()
| 4,315 | Python | .py | 91 | 38.967033 | 73 | 0.550166 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,086 | record_sensor_region_test.py | numenta_nupic-legacy/tests/unit/nupic/regions/record_sensor_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the RecordSensor region."""
import numpy
import os
import unittest2 as unittest
from nupic.engine import Network
from nupic.encoders import MultiEncoder
from nupic.data.file_record_stream import FileRecordStream
def _createNetwork():
"""Create network with one RecordSensor region."""
network = Network()
network.addRegion('sensor', 'py.RecordSensor', '{}')
sensorRegion = network.regions['sensor'].getSelf()
# Add an encoder.
encoderParams = {'consumption': {'fieldname': 'consumption',
'resolution': 0.88,
'seed': 1,
'name': 'consumption',
'type': 'RandomDistributedScalarEncoder'}}
encoder = MultiEncoder()
encoder.addMultipleEncoders(encoderParams)
sensorRegion.encoder = encoder
# Add a data source.
testDir = os.path.dirname(os.path.abspath(__file__))
inputFile = os.path.join(testDir, 'fixtures', 'gymdata-test.csv')
dataSource = FileRecordStream(streamID=inputFile)
sensorRegion.dataSource = dataSource
# Get and set what field index we want to predict.
network.regions['sensor'].setParameter('predictedField', 'consumption')
return network
class RecordSensorRegionTest(unittest.TestCase):
"""RecordSensor region unit tests."""
def testVaryingNumberOfCategories(self):
# Setup network with sensor; max number of categories = 2
net = Network()
sensorRegion = net.addRegion(
"sensor", "py.RecordSensor", "{'numCategories': 2}")
sensor = sensorRegion.getSelf()
# Test for # of output categories = max
data = {"_timestamp": None, "_category": [0, 1], "label": "0 1",
"_sequenceId": 0, "y": 2.624902024, "x": 0.0,
"_timestampRecordIdx": None, "_reset": 0}
sensorOutput = numpy.array([0, 0], dtype="int32")
sensor.populateCategoriesOut(data["_category"], sensorOutput)
self.assertSequenceEqual([0, 1], sensorOutput.tolist(),
"Sensor failed to populate the array with record of two categories.")
# Test for # of output categories > max
data["_category"] = [1, 2, 3]
sensorOutput = numpy.array([0, 0], dtype="int32")
sensor.populateCategoriesOut(data["_category"], sensorOutput)
self.assertSequenceEqual([1, 2], sensorOutput.tolist(),
"Sensor failed to populate the array w/ record of three categories.")
# Test for # of output categories < max
data["_category"] = [3]
sensorOutput = numpy.array([0, 0], dtype="int32")
sensor.populateCategoriesOut(data["_category"], sensorOutput)
self.assertSequenceEqual([3, -1], sensorOutput.tolist(),
"Sensor failed to populate the array w/ record of one category.")
# Test for no output categories
data["_category"] = [None]
sensorOutput = numpy.array([0, 0], dtype="int32")
sensor.populateCategoriesOut(data["_category"], sensorOutput)
self.assertSequenceEqual([-1, -1], sensorOutput.tolist(),
"Sensor failed to populate the array w/ record of zero categories.")
def testBucketIdxOut(self):
network = _createNetwork()
network.run(1) # Process 1 row of data
bucketIdxOut = network.regions['sensor'].getOutputData('bucketIdxOut')[0]
self.assertEquals(bucketIdxOut, 500)
def testActValueOut(self):
network = _createNetwork()
network.run(1) # Process 1 row of data
actValueOut = network.regions['sensor'].getOutputData('actValueOut')[0]
self.assertEquals(round(actValueOut, 1), 21.2) # only 1 precision digit
if __name__ == "__main__":
unittest.main()
| 4,672 | Python | .py | 95 | 43.042105 | 98 | 0.666154 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,087 | knn_anomaly_classifier_region_test.py | numenta_nupic-legacy/tests/unit/nupic/regions/knn_anomaly_classifier_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the htm_prediction_model module."""
import sys
import copy
from datetime import datetime
import unittest2 as unittest
import random
import tempfile
import numpy
from mock import Mock, patch, ANY, call
from nupic.support.unittesthelpers.testcasebase import (unittest,
TestOptionParser)
from nupic.frameworks.opf.opf_utils import InferenceType
from nupic.regions.knn_anomaly_classifier_region import (
KNNAnomalyClassifierRegion,
_CLAClassificationRecord)
from nupic.frameworks.opf.opf_utils import InferenceType
from nupic.frameworks.opf.exceptions import (HTMPredictionModelInvalidRangeError)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.knn_anomaly_classifier_region_capnp import \
KNNAnomalyClassifierRegionProto
class KNNAnomalyClassifierRegionTest(unittest.TestCase):
"""KNNAnomalyClassifierRegion unit tests."""
def setUp(self):
self.params = dict(
trainRecords=10,
anomalyThreshold=1.1,
cacheSize=10000,
k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
self.helper = KNNAnomalyClassifierRegion(**self.params)
def testInit(self):
params = dict(
trainRecords=100,
anomalyThreshold=101,
cacheSize=102,
classificationVectorType=1,
k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
helper = KNNAnomalyClassifierRegion(**params)
self.assertEqual(helper.trainRecords, params['trainRecords'])
self.assertEqual(helper.anomalyThreshold, params['anomalyThreshold'])
self.assertEqual(helper.cacheSize, params['cacheSize'])
self.assertEqual(helper.classificationVectorType,
params['classificationVectorType'])
@patch.object(KNNAnomalyClassifierRegion, '_classifyState')
@patch.object(KNNAnomalyClassifierRegion, 'getParameter')
@patch.object(KNNAnomalyClassifierRegion, '_constructClassificationRecord')
def testCompute(self, constructRecord, getParam, classifyState):
params = {
'trainRecords': 0
}
getParam.side_effect = params.get
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
constructRecord.return_value = record
self.helper.compute(dict(), dict())
classifyState.assert_called_once_with(record)
self.assertEqual(self.helper.labelResults, state['anomalyLabel'])
def testGetLabels(self):
# No _recordsCache
self.helper._recordsCache = []
self.assertEqual(self.helper.getLabels(), \
{'isProcessing': False, 'recordLabels': []})
# Invalid ranges
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.getLabels, start=100, end=100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.getLabels, start=-100, end=-100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.getLabels, start=100, end=-100)
# Valid no threshold labels
values = {
'categoryRecencyList': [4, 5, 7],
}
self.helper.saved_categories = ['TestCategory']
categoryList = [1, 1, 1]
classifier = self.helper._knnclassifier
classifier.getParameter = Mock(side_effect=values.get)
classifier._knn._categoryList = categoryList
results = self.helper.getLabels()
self.assertTrue('isProcessing' in results)
self.assertTrue('recordLabels' in results)
self.assertEqual(len(results['recordLabels']),
len(values['categoryRecencyList']))
for record in results['recordLabels']:
self.assertTrue(record['ROWID'] in values['categoryRecencyList'])
self.assertEqual(record['labels'], self.helper.saved_categories)
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
@patch.object(KNNAnomalyClassifierRegion, '_constructClassificationRecord')
@patch.object(KNNAnomalyClassifierRegion, '_classifyState')
def testAddLabel(self, classifyState, constructVector, getVector):
# Setup Mocks
getVector.return_value = numpy.array([0, 0, 0, 1, 0, 0, 1])
knn = self.helper._knnclassifier._knn
knn.learn = Mock()
# Invalid ranges
self.helper._recordsCache = []
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.addLabel, start=100, end=100, labelName="test")
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.addLabel, start=100, end=100, labelName="test")
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.addLabel, start=-100, end=-100, labelName="test")
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.addLabel, start=100, end=-100, labelName="test")
# Valid no threshold labels
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=[], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.addLabel(11, 12, "Added")
# Verifies records were updated
self.assertEqual(results, None)
self.assertTrue('Added' in self.helper._recordsCache[1].anomalyLabel)
self.assertTrue(self.helper._recordsCache[1].setByUser)
# Verifies record added to KNN classifier
knn.learn.assert_called_once_with(ANY, ANY, rowID=11)
# Verifies records after added label is recomputed
classifyState.assert_called_once_with(self.helper._recordsCache[2])
@patch.object(KNNAnomalyClassifierRegion, '_constructClassificationRecord')
@patch.object(KNNAnomalyClassifierRegion, '_classifyState')
def testRemoveLabel(self, classifyState, constructClassificationRecord):
knn = self.helper._knnclassifier._knn
knn._numPatterns = 3
knn._categoryRecencyList = [10, 11, 12]
knn.removeIds = Mock(side_effect = self.mockRemoveIds)
self.helper._recordsCache = []
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.removeLabels, )
# Invalid ranges
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.removeLabels, start=100, end=100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.removeLabels, start=-100, end=-100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.removeLabels, start=100, end=-100)
# Valid no threshold labels
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.removeLabels(11, 12, "Test")
self.assertEqual(results, None)
self.assertTrue('Test' not in self.helper._recordsCache[1].anomalyLabel)
# Verifies records removed from KNN classifier
self.assertEqual(knn.removeIds.mock_calls, [call([11]), call([])])
# Verifies records after removed record are updated
classifyState.assert_called_once_with(self.helper._recordsCache[2])
@patch.object(KNNAnomalyClassifierRegion, '_constructClassificationRecord')
@patch.object(KNNAnomalyClassifierRegion, '_classifyState')
def testRemoveLabelNoFilter(self, classifyState,
constructClassificationRecord):
knn = self.helper._knnclassifier._knn
knn._numPatterns = 3
knn._categoryRecencyList = [10, 11, 12]
knn.removeIds = Mock(side_effect=self.mockRemoveIds)
# Valid no threshold labels
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.removeLabels(11, 12)
self.assertEqual(results, None)
self.assertTrue('Test' not in self.helper._recordsCache[1].anomalyLabel)
# Verifies records removed from KNN classifier
self.assertEqual(knn.removeIds.mock_calls, [call([11]), call([])])
# Verifies records after removed record are updated
classifyState.assert_called_once_with(self.helper._recordsCache[2])
@patch.object(KNNAnomalyClassifierRegion, '_classifyState')
def testSetGetThreshold(self, classifyState):
self.helper._recordsCache = [Mock(), Mock(), Mock()]
self.helper.setParameter('anomalyThreshold', None, 1.0)
self.assertAlmostEqual(self.helper.anomalyThreshold, 1.0)
self.assertEqual(len(classifyState.mock_calls),
len(self.helper._recordsCache))
self.assertAlmostEqual(self.helper.getParameter('anomalyThreshold'), 1.0)
self.assertRaises(Exception, self.helper.setParameter,
'anomalyThreshold', None, 'invalid')
@patch.object(KNNAnomalyClassifierRegion, '_classifyState')
def testSetGetWaitRecords(self, classifyState):
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
self.helper.setParameter('trainRecords', None, 20)
self.assertEqual(self.helper.trainRecords, 20)
self.assertEqual(len(classifyState.mock_calls),
len(self.helper._recordsCache))
self.assertEqual(self.helper.getParameter('trainRecords'), 20)
# Test invalid parameter type
self.assertRaises(Exception, self.helper.setParameter,
'trainRecords', None, 'invalid')
# Test invalid value before first record ROWID in cache
state = {
"ROWID": 1000,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
self.helper._recordsCache = [state]
self.assertRaises(Exception, self.helper.setParameter,
'trainRecords', None, 0)
@patch.object(KNNAnomalyClassifierRegion, '_constructClassificationRecord')
def testSetGetWaitRecordsRecalculate(self, getRecord):
"""
This test ensures that records in classifier are removed when they are no
longer being used when the trainRecords is set.
"""
self.helper.cacheSize = 5
self.helper.anomalyThreshold = 0.8
self.helper._anomalyVectorLength = 20
records = [
Mock(ROWID=10, anomalyLabel=["Test"], anomalyScore=1, setByUser=False, anomalyVector=numpy.array([1,4])),
Mock(ROWID=11, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,2])),
Mock(ROWID=12, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,4])),
Mock(ROWID=13, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,2,6,7])),
Mock(ROWID=14, anomalyLabel=["Test"], anomalyScore=1, setByUser=False, anomalyVector=numpy.array([1,10])),
Mock(ROWID=15, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,3])),
Mock(ROWID=16, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,4])),
Mock(ROWID=17, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([10])),
Mock(ROWID=18, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,4]))]
getRecord.side_effect = records
for i in records:
self.helper.compute(dict(), dict())
self.assertEqual(self.helper._knnclassifier._knn._numPatterns, 6)
self.assertEqual(
self.helper._knnclassifier.getParameter('categoryRecencyList'),
[10, 12, 14, 16, 17, 18],
"Classifier incorrectly classified test records."
)
# Now set trainRecords and should remove the labels outside of cache
# and relabel points.
self.helper.setParameter('trainRecords', None, 14)
self.assertEqual(self.helper._knnclassifier._knn._numPatterns, 2)
self.assertEqual(
self.helper._knnclassifier.getParameter('categoryRecencyList'),
[14, 17],
"Classifier incorrectly reclassified test records after setting "
"trainRecords")
@patch.object(KNNAnomalyClassifierRegion, '_addRecordToKNN')
@patch.object(KNNAnomalyClassifierRegion, '_deleteRecordsFromKNN')
@patch.object(KNNAnomalyClassifierRegion, '_recomputeRecordFromKNN')
@patch.object(KNNAnomalyClassifierRegion, '_categoryToLabelList')
def testUpdateState(self, toLabelList, recompute, deleteRecord, addRecord):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "",
"anomalyLabel": ["Label"],
"setByUser": False
}
# Test record not labeled and not above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.trainRecords = 0
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper._classifyState(state)
self.assertEqual(state.anomalyLabel, [])
deleteRecord.assert_called_once_with([state])
# Test record not labeled and above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 0.5
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper._classifyState(state)
self.assertEqual(state.anomalyLabel, \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL])
addRecord.assert_called_once_with(state)
# Test record not labeled and above threshold during wait period
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.trainRecords = 10
self.helper.anomalyThreshold = 0.5
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper._classifyState(state)
self.assertEqual(state.anomalyLabel, [])
self.assertTrue(not addRecord.called)
# Test record labeled and not above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.trainRecords = 0
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = ["Label"]
state = _CLAClassificationRecord(**record)
self.helper._classifyState(state)
self.assertEqual(state.anomalyLabel, ["Label"])
self.assertTrue(not addRecord.called)
# Test setByUser
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = ["Label 2"]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
state = _CLAClassificationRecord(**recordCopy)
self.helper._classifyState(state)
self.assertEqual(state.anomalyLabel,
[recordCopy["anomalyLabel"][0], toLabelList.return_value[0]])
addRecord.assert_called_once_with(state)
# Test removal of above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = []
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL,
KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
KNNAnomalyClassifierRegion.AUTO_TAG]
state = _CLAClassificationRecord(**recordCopy)
self.helper._classifyState(state)
self.assertEqual(state.anomalyLabel, [])
# Auto classified threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL]
state = _CLAClassificationRecord(**recordCopy)
self.helper._classifyState(state)
self.assertEqual(state.anomalyLabel,
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
KNNAnomalyClassifierRegion.AUTO_TAG])
addRecord.assert_called_once_with(state)
# Test precedence of threshold label above auto threshold label
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 0.8
toLabelList.return_value = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL,
KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
KNNAnomalyClassifierRegion.AUTO_TAG]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL]
state = _CLAClassificationRecord(**recordCopy)
self.helper._classifyState(state)
self.assertEqual(state.anomalyLabel,
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL])
addRecord.assert_called_once_with(state)
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
def testAddRecordToKNN(self, getAnomalyVector):
getAnomalyVector.return_value = numpy.array([0, 1, 0, 0, 1, 0, 1, 1])
values = {
'categoryRecencyList': [1, 2, 3]
}
classifier = self.helper._knnclassifier
classifier.getParameter = Mock(side_effect=values.get)
classifier._knn.learn = Mock()
classifier._knn.prototypeSetCategory = Mock()
state = {
"ROWID": 5,
"anomalyScore": 1.0,
"anomalyVector": numpy.array([1, 5, 7, 8]),
"anomalyLabel": ["Label"],
"setByUser": False
}
record = _CLAClassificationRecord(**state)
# Test with record not already in KNN
self.helper._addRecordToKNN(record)
classifier._knn.learn.assert_called_once_with(getAnomalyVector.return_value,
ANY, rowID=state['ROWID'])
self.assertTrue(not classifier._knn.prototypeSetCategory.called)
classifier._knn.learn.reset_mock()
# Test with record already in KNN
values = {
'categoryRecencyList': [1, 2, 3, 5]
}
classifier.getParameter.side_effect = values.get
self.helper._addRecordToKNN(record)
classifier._knn.prototypeSetCategory.assert_called_once_with(
state['ROWID'], ANY)
self.assertTrue(not classifier._knn.learn.called)
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
def testDeleteRangeFromKNN(self, getAnomalyVector):
getAnomalyVector.return_value = "Vector"
values = {
'categoryRecencyList': [1, 2, 3]
}
classifier = self.helper._knnclassifier
classifier.getParameter = Mock(side_effect=values.get)
classifier._knn._numPatterns = len(values['categoryRecencyList'])
classifier._knn.removeIds = Mock(side_effect=self.mockRemoveIds)
# Test with record not already in KNN
self.helper._deleteRangeFromKNN(start=1, end=3)
classifier._knn.removeIds.assert_called_once_with([1, 2])
classifier._knn.removeIds.reset_mock()
# Test with record already in KNN
values = {
'categoryRecencyList': [1, 2, 3, 5]
}
classifier.getParameter.side_effect = values.get
self.helper._deleteRangeFromKNN(start=1)
classifier._knn.removeIds.assert_called_once_with([1, 2, 3, 5])
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
def testRecomputeRecordFromKNN(self, getAnomalyVector):
getAnomalyVector.return_value = "Vector"
self.helper.trainRecords = 0
values = {
'categoryRecencyList': [1, 2, 3, 5, 6, 7, 8, 9],
'latestDists': numpy.array([0.7, 0.2, 0.5, 1, 0.3, 0.2, 0.1]),
'categories': ['A','B','C','D','E','F','G']
}
classifier = self.helper._knnclassifier
classifier.getLatestDistances = Mock(return_value=values['latestDists'])
classifier.getCategoryList = Mock(return_value=values['categories'])
classifier.getParameter = Mock(side_effect=values.get)
classifier.setParameter = Mock()
classifier.compute = Mock()
state = {
"ROWID": 5,
"anomalyScore": 1.0,
"anomalyVector": "",
"anomalyLabel": ["Label"],
"setByUser": False
}
record = _CLAClassificationRecord(**state)
# Test finding best category before record - exists
self.helper._classificationMaxDist = 0.4
self.helper._autoDetectWaitRecords = 0
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, 'B')
# Test finding best category before record - does not exists
self.helper._classificationMaxDist = 0.1
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, None)
# Test finding best category before record - not record before
record.ROWID = 0
self.helper._classificationMaxDist = 0.1
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, None)
def testConstructClassificationVector(self):
modelParams = {
'__numRunCalls': 0
}
spVals = {
'params': {
'activeOutputCount': 5
},
'output': {
'bottomUpOut': numpy.array([1, 1, 0, 0, 1])
}
}
tpVals = {
'params': {
'cellsPerColumn': 2,
'columnCount': 2
},
'output': {
'lrnActive': numpy.array([1, 0, 0, 1]),
'topDownOut': numpy.array([1, 0, 0, 0, 1])
}
}
inputs = dict(
spBottomUpOut=spVals['output']['bottomUpOut'],
tpTopDownOut=tpVals['output']['topDownOut'],
tpLrnActiveStateT=tpVals['output']['lrnActive']
)
self.helper._activeColumnCount = 5
# Test TM Cell vector
self.helper.classificationVectorType = 1
vector = self.helper._constructClassificationRecord(inputs)
self.assertEqual(vector.anomalyVector,
tpVals['output']['lrnActive'].nonzero()[0].tolist())
# Test SP and TM Column Error vector
self.helper.classificationVectorType = 2
self.helper._prevPredictedColumns = numpy.array(
[1, 0, 0, 0, 1]).nonzero()[0]
vector = self.helper._constructClassificationRecord(inputs)
self.assertEqual(vector.anomalyVector, [0, 1, 4])
self.helper._prevPredictedColumns = numpy.array(
[1, 0, 1, 0, 0]).nonzero()[0]
vector = self.helper._constructClassificationRecord(inputs)
self.assertEqual(vector.anomalyVector, [0, 1, 4, 7])
self.helper.classificationVectorType = 3
self.assertRaises(TypeError, self.helper._constructClassificationRecord,
inputs)
@patch.object(KNNAnomalyClassifierRegion ,'_classifyState')
@patch.object(KNNAnomalyClassifierRegion, '_constructClassificationRecord')
def testCompute(self, createRecord, updateState):
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": numpy.array([1, 0, 0, 0, 1]),
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
createRecord.return_value = record
inputs = dict()
outputs= dict()
# Test add first record
self.helper.cacheSize = 10
self.helper.trainRecords = 0
self.helper._recordsCache = []
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
updateState.assert_called_once_with(self.helper._recordsCache[-1])
# Test add record before wait records
updateState.reset_mock()
self.helper.cacheSize = 10
self.helper.trainRecords = 10
self.helper._recordsCache = []
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 2)
self.assertTrue(not updateState.called)
# Test exceeded cache length
updateState.reset_mock()
self.helper.cacheSize = 1
self.helper._recordsCache = []
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
self.assertTrue(not updateState.called)
def testCategoryToList(self):
result = self.helper._categoryToLabelList(None)
self.assertEqual(result, [])
self.helper.saved_categories = ['A', 'B', 'C']
result = self.helper._categoryToLabelList(1)
self.assertEqual(result, ['A'])
result = self.helper._categoryToLabelList(4)
self.assertEqual(result, ['C'])
result = self.helper._categoryToLabelList(5)
self.assertEqual(result, ['A','C'])
def testGetAnomalyVector(self):
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
self.helper._anomalyVectorLength = 10
vector = self.helper._getStateAnomalyVector(record)
self.assertEqual(len(vector), self.helper._anomalyVectorLength)
self.assertEqual(vector.nonzero()[0].tolist(), record.anomalyVector)
# Tests for configuration
# ===========================================================================
def testSetState(self):
# No Version set
state = dict(_classificationDelay=100)
state['_knnclassifierProps'] = self.params
self.helper._vectorType = None
self.helper.__setstate__(state)
self.assertEqual(self.helper.classificationVectorType, 1)
self.assertEqual(self.helper._version,
KNNAnomalyClassifierRegion.__VERSION__)
# Version 1
state = dict(_version=1, _classificationDelay=100)
state['_knnclassifierProps'] = self.params
self.helper.__setstate__(state)
self.assertEqual(self.helper._version,
KNNAnomalyClassifierRegion.__VERSION__)
# Invalid Version
state = dict(_version="invalid")
state['_knnclassifierProps'] = self.params
self.assertRaises(Exception, self.helper.__setstate__, state)
# Tests for _HTMClassificationRecord class
# ===========================================================================
def testCLAClassificationRecord(self):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label"
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, False)
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": True
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, record['setByUser'])
def testCLAClassificationRecordGetState(self):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": False
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.__getstate__(), record)
def testCLAClassificationRecordSetState(self):
record = {
"ROWID": None,
"anomalyScore": None,
"anomalyVector": None,
"anomalyLabel": None,
"setByUser": None
}
state = _CLAClassificationRecord(**record)
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": False
}
state.__setstate__(record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, record['setByUser'])
def mockRemoveIds(self, ids):
self.helper._knnclassifier._knn._numPatterns -= len(ids)
knnClassifier = self.helper._knnclassifier
for idx in ids:
if idx in self.helper._knnclassifier.getParameter('categoryRecencyList'):
knnClassifier.getParameter('categoryRecencyList').remove(idx)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
self.maxDiff = None
records = []
for i in xrange(self.helper.trainRecords):
spBottomUpOut = numpy.zeros(1000)
tpTopDownOut = numpy.zeros(1000)
tpLrnActiveStateT = numpy.zeros(1000)
spBottomUpOut[random.sample(xrange(1000), 20)] = 1
tpTopDownOut[random.sample(xrange(1000), 20)] = 1
tpLrnActiveStateT[random.sample(xrange(1000), 20)] = 1
records.append({
'spBottomUpOut': spBottomUpOut,
'tpTopDownOut': tpTopDownOut,
'tpLrnActiveStateT': tpLrnActiveStateT
})
self.helper.setParameter('anomalyThreshold', None, 0.5)
for i in xrange(self.helper.trainRecords):
self.helper.compute(records[i], None)
for _ in xrange(10):
self.helper.compute(random.choice(records), None)
proto = KNNAnomalyClassifierRegionProto.new_message()
self.helper.writeToProto(proto)
with tempfile.TemporaryFile() as f:
proto.write(f)
f.seek(0)
protoDeserialized = KNNAnomalyClassifierRegionProto.read(f)
knnDeserialized = KNNAnomalyClassifierRegion.readFromProto(
protoDeserialized)
self.assertEquals(self.helper._maxLabelOutputs,
knnDeserialized._maxLabelOutputs)
self.assertEquals(self.helper._activeColumnCount,
knnDeserialized._activeColumnCount)
self.assertTrue((self.helper._prevPredictedColumns ==
knnDeserialized._prevPredictedColumns).all())
self.assertEquals(self.helper._anomalyVectorLength,
knnDeserialized._anomalyVectorLength)
self.assertAlmostEquals(self.helper._classificationMaxDist,
knnDeserialized._classificationMaxDist)
self.assertEquals(self.helper._iteration, knnDeserialized._iteration)
self.assertEquals(self.helper.trainRecords, knnDeserialized.trainRecords)
self.assertEquals(self.helper.anomalyThreshold,
knnDeserialized.anomalyThreshold)
self.assertEquals(self.helper.cacheSize, knnDeserialized.cacheSize)
self.assertEquals(self.helper.classificationVectorType,
knnDeserialized.classificationVectorType)
self.assertListEqual(self.helper.getLabelResults(),
knnDeserialized.getLabelResults())
for i, expected in enumerate(self.helper._recordsCache):
actual = knnDeserialized._recordsCache[i]
self.assertEquals(expected.ROWID, actual.ROWID)
self.assertAlmostEquals(expected.anomalyScore, actual.anomalyScore)
self.assertListEqual(expected.anomalyVector, actual.anomalyVector)
self.assertListEqual(expected.anomalyLabel, actual.anomalyLabel)
self.assertEquals(expected.setByUser, actual.setByUser)
if __name__ == '__main__':
parser = TestOptionParser()
options, args = parser.parse_args()
# Form the command line for the unit test framework
args = [sys.argv[0]] + args
unittest.main(argv=args)
| 33,328 | Python | .py | 749 | 38.197597 | 115 | 0.704803 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,088 | unified_py_parameter_test.py | numenta_nupic-legacy/tests/unit/nupic/engine/unified_py_parameter_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Test for get/setParameter in python -- these methods are syntactic sugar
that allow you to access parameters without knowing their types,
at a moderate performance penalty.
"""
import unittest2 as unittest
# import for type comparison with Array.
# (Seems we should be able to use nupic.engine.Array directly.)
import nupic.bindings.engine_internal
from nupic.engine import Network
class NetworkUnifiedPyParameterTest(unittest.TestCase):
def testScalars(self):
scalars = [
("int32Param", 32, int, 35),
("uint32Param", 33, int, 36),
("int64Param", 64, long, 74),
("uint64Param", 65, long, 75),
("real32Param", 32.1, float, 33.1),
("real64Param", 64.1, float, 65.1),
("stringParam", "nodespec value", str, "new value")]
n = Network()
l1= n.addRegion("l1", "TestNode", "")
x = l1.getParameter("uint32Param")
for paramName, initval, paramtype, newval in scalars:
# Check the initial value for each parameter.
x = l1.getParameter(paramName)
self.assertEqual(type(x), paramtype)
if initval is None:
continue
if type(x) == float:
self.assertTrue(abs(x - initval) < 0.00001)
else:
self.assertEqual(x, initval)
# Now set the value, and check to make sure the value is updated
l1.setParameter(paramName, newval)
x = l1.getParameter(paramName)
self.assertEqual(type(x), paramtype)
if type(x) == float:
self.assertTrue(abs(x - newval) < 0.00001)
else:
self.assertEqual(x, newval)
def testArrays(self):
arrays = [
("real32ArrayParam",
[0*32, 1*32, 2*32, 3*32, 4*32, 5*32, 6*32, 7*32],
"Real32"),
("int64ArrayParam",
[0*64, 1*64, 2*64, 3*64],
"Int64")
]
n = Network()
l1= n.addRegion("l1", "TestNode", "")
for paramName, initval, paramtype in arrays:
x = l1.getParameter(paramName)
self.assertTrue(isinstance(x, nupic.bindings.engine_internal.Array))
self.assertEqual(x.getType(), paramtype)
self.assertEqual(len(x), len(initval))
for i in xrange(len(x)):
self.assertEqual(x[i], initval[i])
for i in xrange(len(x)):
x[i] = x[i] * 2
l1.setParameter(paramName, x)
x = l1.getParameter(paramName)
self.assertTrue(isinstance(x, nupic.bindings.engine_internal.Array))
self.assertEqual(x.getType(), paramtype)
self.assertEqual(len(x), len(initval))
for i in xrange(len(x)):
self.assertEqual(x[i], 2 * initval[i])
if __name__ == "__main__":
unittest.main()
| 3,582 | Python | .py | 90 | 34.744444 | 74 | 0.648329 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,089 | syntactic_sugar_test.py | numenta_nupic-legacy/tests/unit/nupic/engine/syntactic_sugar_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import unittest2 as unittest
import nupic.engine as net
class NetworkSugarTest(unittest.TestCase):
def testPhases(self):
n = net.Network()
self.assertEqual(n.minPhase, 0)
self.assertEqual(n.maxPhase, 0)
self.assertEqual(n.minEnabledPhase, 0)
self.assertEqual(n.maxEnabledPhase, 0)
_r1 = n.addRegion('r1', 'TestNode', '')
_r2 = n.addRegion('r2', 'TestNode', '')
self.assertEqual(n.minPhase, 0)
self.assertEqual(n.maxPhase, 1)
self.assertEqual(n.minEnabledPhase, 0)
self.assertEqual(n.maxEnabledPhase, 1)
n.setPhases('r1', (1, 4))
n.setPhases('r2', (2, 3))
self.assertEqual(n.minPhase, 1)
self.assertEqual(n.maxPhase, 4)
self.assertEqual(n.minEnabledPhase, 1)
self.assertEqual(n.maxEnabledPhase, 4)
n.minEnabledPhase = 2
n.maxEnabledPhase = 3
self.assertEqual(n.minPhase, 1)
self.assertEqual(n.maxPhase, 4)
self.assertEqual(n.minEnabledPhase, 2)
self.assertEqual(n.maxEnabledPhase, 3)
def testRegionCollection(self):
n = net.Network()
regions = n.regions
self.assertEqual(len(regions), 0)
r1 = n.addRegion('r1', 'TestNode', '')
r2 = n.addRegion('r2', 'TestNode', '')
self.assertTrue(r1 is not None)
self.assertEqual(len(regions), 2)
# test the 'in' operator
self.assertTrue('r1' in regions)
self.assertTrue('r2' in regions)
self.assertFalse('r3' in regions)
# test [] operator
self.assertEqual(regions['r1'], r1)
self.assertEqual(regions['r2'], r2)
with self.assertRaises(KeyError):
_ = regions['r3']
# for iteration
for i, r in enumerate(regions):
if i == 0:
self.assertEqual(r[0], 'r1')
elif i == 1:
self.assertEqual(r[0], 'r2')
else:
self.fail("Expected i == 0 or i == 1")
# test .keys()
keys = regions.keys()
self.assertEqual(keys, list(['r1', 'r2']))
# test .values()
values = regions.values()
self.assertEqual(len(values), 2)
v1 = values.pop()
v2 = values.pop()
self.assertTrue((v1, v2) == (r1, r2) or (v1, v2) == (r2, r1))
# test .items()
items = regions.items()
self.assertEqual(len(items), 2)
i1 = items.pop()
i2 = items.pop()
self.assertTrue((i1, i2) == (('r1', r1), ('r2', r2)) or
(('r2', r2), ('r1', r1)))
@unittest.skipIf(sys.platform.lower().startswith("win"),
"Not supported on Windows, yet!")
def testRegion(self):
r = net.Network().addRegion('r', 'py.TestNode', '')
print r.spec
self.assertEqual(r.type, 'py.TestNode')
self.assertEqual(r.name, 'r')
self.assertTrue(r.dimensions.isUnspecified())
@unittest.skipIf(sys.platform.lower().startswith("win"),
"Not supported on Windows, yet!")
def testSpec(self):
ns = net.Region.getSpecFromType('py.TestNode')
self.assertEqual(ns.description,
'The node spec of the NuPIC 2 Python TestNode')
n = net.Network()
r = n.addRegion('r', 'py.TestNode', '')
ns2 = r.spec
self.assertEqual(ns.singleNodeOnly, ns2.singleNodeOnly)
self.assertEqual(ns.description, ns2.description)
self.assertEqual(ns.inputs, ns2.inputs)
self.assertEqual(ns.outputs, ns2.outputs)
self.assertEqual(ns.parameters, ns2.parameters)
self.assertEqual(ns.commands, ns2.commands)
def testTimer(self):
t = net.Timer()
self.assertEqual(t.elapsed, 0)
self.assertEqual(t.startCount, 0)
self.assertEqual(str(t), "[Elapsed: 0 Starts: 0]")
t.start()
# Dummy time
_j = 0
for i in xrange(0, 1000):
_j = i
t.stop()
self.assertTrue(t.elapsed > 0)
self.assertEqual(t.startCount, 1)
if __name__ == "__main__":
unittest.main()
| 4,744 | Python | .py | 127 | 32.228346 | 72 | 0.64412 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,090 | network_test.py | numenta_nupic-legacy/tests/unit/nupic/engine/network_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
from mock import patch
import unittest2 as unittest
from nupic import engine
from nupic.bindings.regions.TestNode import TestNode
from nupic.regions.sp_region import SPRegion
class NetworkTest(unittest.TestCase):
@unittest.skipIf(sys.platform.lower().startswith("win"),
"Not supported on Windows, yet!")
def testErrorHandling(self):
n = engine.Network()
# Test trying to add non-existent node
with self.assertRaises(Exception) as cm:
n.addRegion('r', 'py.NonExistingNode', '')
self.assertEqual(cm.exception.message, "Matching Python module for NonExistingNode not found.")
orig_import = __import__
def import_mock(name, *args):
if name == "nupic.regions.unimportable_node":
raise SyntaxError("invalid syntax (unimportable_node.py, line 5)")
return orig_import(name, *args)
with patch('__builtin__.__import__', side_effect=import_mock):
# Test failure during import
with self.assertRaises(Exception) as cm:
n.addRegion('r', 'py.UnimportableNode', '')
self.assertEqual(cm.exception.message, "invalid syntax (unimportable_node.py, line 5)")
# Test failure in the __init__() method
with self.assertRaises(Exception) as cm:
n.addRegion('r', 'py.TestNode', '{ failInInit: 1 }')
self.assertEqual(cm.exception.message, "TestNode.__init__() Failing on purpose as requested")
# Test failure inside the compute() method
with self.assertRaises(Exception) as cm:
r = n.addRegion('r', 'py.TestNode', '{ failInCompute: 1 }')
r.dimensions = engine.Dimensions([4, 4])
n.initialize()
n.run(1)
self.assertEqual(str(cm.exception),
'TestNode.compute() Failing on purpose as requested')
# Test failure in the static getSpec
from nupic.bindings.regions.TestNode import TestNode
TestNode._failIngetSpec = True
with self.assertRaises(Exception) as cm:
TestNode.getSpec()
self.assertEqual(str(cm.exception),
'Failing in TestNode.getSpec() as requested')
del TestNode._failIngetSpec
def testGetSpecFromType(self):
ns = engine.Region.getSpecFromType('py.SPRegion')
p = ns.parameters['breakPdb']
self.assertEqual(p.accessMode, 'ReadWrite')
def testOneRegionNetwork(self):
n = engine.Network()
print "Number of regions in new network: %d" % len(n.regions)
self.assertEqual(len(n.regions), 0)
print "Adding level1SP"
level1SP = n.addRegion("level1SP", "TestNode", "")
print "Current dimensions are: %s" % level1SP.dimensions
print "Number of regions in network: %d" % len(n.regions)
self.assertEqual(len(n.regions), 1)
self.assertEqual(len(n.regions), len(n.regions))
print 'Node type: ', level1SP.type
print("Attempting to initialize net when "
"one region has unspecified dimensions")
print "Current dimensions are: %s" % level1SP.dimensions
with self.assertRaises(Exception):
n.initialize()
# Test Dimensions
level1SP.dimensions = engine.Dimensions([4, 4])
print "Set dimensions of level1SP to %s" % str(level1SP.dimensions)
n.initialize()
# Test Array
a = engine.Array('Int32', 10)
self.assertEqual(a.getType(), 'Int32')
self.assertEqual(len(a), 10)
import nupic
self.assertEqual(type(a), nupic.bindings.engine_internal.Int32Array)
for i in range(len(a)):
a[i] = i
for i in range(len(a)):
self.assertEqual(type(a[i]), int)
self.assertEqual(a[i], i)
print i,
print
# --- Test Numpy Array
print 'Testing Numpy Array'
a = engine.Array('Byte', 15)
print len(a)
for i in range(len(a)):
a[i] = ord('A') + i
for i in range(len(a)):
print a[i], ord('A') + i
self.assertEqual(ord(a[i]), ord('A') + i)
print
print 'before asNumpyarray()'
na = a.asNumpyArray()
print 'after asNumpyarray()'
self.assertEqual(na.shape, (15,))
print 'na.shape:', na.shape
na = na.reshape(5, 3)
self.assertEqual(na.shape, (5, 3))
print 'na.shape:', na.shape
for i in range(5):
for j in range(3):
print chr(na[i, j]), ' ',
print
print
# --- Test get/setParameter for Int64 and Real64
print '---'
print 'Testing get/setParameter for Int64/Real64'
val = level1SP.getParameterInt64('int64Param')
rval = level1SP.getParameterReal64('real64Param')
print 'level1SP.int64Param = ', val
print 'level1SP.real64Param = ', rval
val = 20
level1SP.setParameterInt64('int64Param', val)
val = 0
val = level1SP.getParameterInt64('int64Param')
print 'level1SP.int64Param = ', val, ' after setting to 20'
rval = 30.1
level1SP.setParameterReal64('real64Param', rval)
rval = 0.0
rval = level1SP.getParameterReal64('real64Param')
print 'level1SP.real64Param = ', rval, ' after setting to 30.1'
# --- Test array parameter
# Array a will be allocated inside getParameter
print '---'
print 'Testing get/setParameterArray'
a = engine.Array('Int64', 4)
level1SP.getParameterArray("int64ArrayParam", a)
print 'level1SP.int64ArrayParam size = ', len(a)
print 'level1SP.int64ArrayParam = [ ',
for i in range(len(a)):
print a[i],
print ']'
#
# --- test setParameter of an Int64 Array ---
print 'Setting level1SP.int64ArrayParam to [ 1 2 3 4 ]'
a2 = engine.Array('Int64', 4)
for i in range(4):
a2[i] = i + 1
level1SP.setParameterArray('int64ArrayParam', a2)
# get the value of int64ArrayParam after the setParameter call.
# The array a owns its buffer, so we can call releaseBuffer if we
# want, but the buffer should be reused if we just pass it again.
#// a.releaseBuffer();
level1SP.getParameterArray('int64ArrayParam', a)
print 'level1SP.int64ArrayParam size = ', len(a)
print 'level1SP.int64ArrayParam = [ ',
for i in range(len(a)):
print a[i],
print ']'
level1SP.compute()
print "Running for 2 iteraitons"
n.run(2)
# --- Test input/output access
#
# Getting access via zero-copy
with self.assertRaises(Exception):
level1SP.getOutputData('doesnotexist')
output = level1SP.getOutputData('bottomUpOut')
print 'Element count in bottomUpOut is ', len(output)
# set the actual output
output[11] = 7777
output[12] = 54321
# Create a reshaped view of the numpy array
# original output is 32x1 -- 16 nodes, 2 elements per node
# Reshape to 8 rows, 4 columns
numpy_output2 = output.reshape(8, 4)
# Make sure the original output, the numpy array and the reshaped numpy view
# are all in sync and access the same underlying memory.
numpy_output2[1, 0] = 5555
self.assertEqual(output[4], 5555)
output[5] = 3333
self.assertEqual(numpy_output2[1, 1], 3333)
numpy_output2[1, 2] = 4444
# --- Test doc strings
# TODO: commented out because I'm not sure what to do with these
# now that regions have been converted to the Collection class.
# print
# print "Here are some docstrings for properties and methods:"
# for name in ('regionCount', 'getRegionCount', 'getRegionByName'):
# x = getattr(engine.Network, name)
# if isinstance(x, property):
# print 'property Network.{0}: "{1}"'.format(name, x.__doc__)
# else:
# print 'method Network.{0}(): "{1}"'.format(name, x.__doc__)
# Typed methods should return correct type
print "real64Param: %.2f" % level1SP.getParameterReal64("real64Param")
# Uncomment to get performance for getParameter
if 0:
import time
t1 = time.time()
t1 = time.time()
for i in xrange(0, 1000000):
# x = level1SP.getParameterInt64("int64Param") # buffered
x = level1SP.getParameterReal64("real64Param") # unbuffered
t2 = time.time()
print "Time for 1M getParameter calls: %.2f seconds" % (t2 - t1)
def testTwoRegionNetwork(self):
n = engine.Network()
region1 = n.addRegion("region1", "TestNode", "")
region2 = n.addRegion("region2", "TestNode", "")
names = [region[0] for region in n.regions]
self.assertEqual(names, ['region1', 'region2'])
print n.getPhases('region1')
self.assertEqual(n.getPhases('region1'), (0,))
self.assertEqual(n.getPhases('region2'), (1,))
n.link("region1", "region2", "TestFanIn2", "")
print "Initialize should fail..."
with self.assertRaises(Exception):
n.initialize()
print "Setting region1 dims"
r1dims = engine.Dimensions([6, 4])
region1.setDimensions(r1dims)
print "Initialize should now succeed"
n.initialize()
r2dims = region2.dimensions
self.assertEqual(len(r2dims), 2)
self.assertEqual(r2dims[0], 3)
self.assertEqual(r2dims[1], 2)
# Negative test
with self.assertRaises(Exception):
region2.setDimensions(r1dims)
def testDelayedLink(self):
n = engine.Network()
region1 = n.addRegion("region1", "TestNode", "")
region2 = n.addRegion("region2", "TestNode", "")
names = []
propagationDelay = 2
n.link("region1", "region2", "TestFanIn2", "",
propagationDelay=propagationDelay)
r1dims = engine.Dimensions([6, 4])
region1.setDimensions(r1dims)
n.initialize()
outputArrays = []
inputArrays = []
iterations = propagationDelay + 2
for i in xrange(iterations):
n.run(1)
if i < iterations - propagationDelay:
outputArrays.append(list(region1.getOutputData("bottomUpOut")))
if i < propagationDelay:
# Pre-initialized delay elements should be arrays of all 0's
outputArrays.insert(i, [0.0] * len(outputArrays[0]))
inputArrays.append(list(region2.getInputData("bottomUpIn")))
self.assertListEqual(inputArrays, outputArrays)
def testInputsAndOutputs(self):
n = engine.Network()
region1 = n.addRegion("region1", "TestNode", "")
region2 = n.addRegion("region2", "TestNode", "")
region1.setDimensions(engine.Dimensions([6, 4]))
n.link("region1", "region2", "TestFanIn2", "")
n.initialize()
r1_output = region1.getOutputData("bottomUpOut")
region1.compute()
print "Region 1 output after first iteration:"
print "r1_output:", r1_output
region2.prepareInputs()
r2_input = region2.getInputData("bottomUpIn")
print "Region 2 input after first iteration:"
print 'r2_input:', r2_input
def testNodeSpec(self):
n = engine.Network()
r = n.addRegion("region", "TestNode", "")
print r.getSpec()
@unittest.skipIf(sys.platform.lower().startswith("win"),
"Not supported on Windows, yet!")
def testPyNodeGetSetParameter(self):
n = engine.Network()
r = n.addRegion("region", "py.TestNode", "")
print "Setting region1 dims"
r.dimensions = engine.Dimensions([6, 4])
print "Initialize should now succeed"
n.initialize()
result = r.getParameterReal64('real64Param')
self.assertEqual(result, 64.1)
r.setParameterReal64('real64Param', 77.7)
result = r.getParameterReal64('real64Param')
self.assertEqual(result, 77.7)
@unittest.skipIf(sys.platform.lower().startswith("win"),
"Not supported on Windows, yet!")
def testPyNodeGetNodeSpec(self):
n = engine.Network()
r = n.addRegion("region", "py.TestNode", "")
print "Setting region1 dims"
r.setDimensions(engine.Dimensions([6, 4]))
print "Initialize should now succeed"
n.initialize()
ns = r.spec
self.assertEqual(len(ns.inputs), 1)
i = ns.inputs['bottomUpIn']
self.assertEqual(i.description, 'Primary input for the node')
self.assertEqual(len(ns.outputs), 1)
i = ns.outputs['bottomUpOut']
self.assertEqual(i.description, 'Primary output for the node')
@unittest.skipIf(sys.platform.lower().startswith("win"),
"Not supported on Windows, yet!")
def testTwoRegionPyNodeNetwork(self):
n = engine.Network()
region1 = n.addRegion("region1", "py.TestNode", "")
region2 = n.addRegion("region2", "py.TestNode", "")
n.link("region1", "region2", "TestFanIn2", "")
print "Initialize should fail..."
with self.assertRaises(Exception):
n.initialize()
print "Setting region1 dims"
r1dims = engine.Dimensions([6, 4])
region1.setDimensions(r1dims)
print "Initialize should now succeed"
n.initialize()
r2dims = region2.dimensions
self.assertEqual(len(r2dims), 2)
self.assertEqual(r2dims[0], 3)
self.assertEqual(r2dims[1], 2)
def testGetRegion(self):
n = engine.Network()
n.addRegion("region1", "py.TestNode", "")
region = n.getRegionsByType(TestNode)[0]
self.assertEqual(type(region.getSelf()), TestNode)
self.assertEqual(n.getRegionsByType(SPRegion), [])
| 13,834 | Python | .py | 339 | 35.300885 | 99 | 0.672923 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,091 | spatial_pooler_cpp_unit_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/spatial_pooler_cpp_unit_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import numpy as np
from nupic.bindings.math import GetNTAReal
from nupic.bindings.algorithms import SpatialPooler
# Uncomment below line to use python SpatialPooler
# from nupic.algorithms.spatial_pooler import SpatialPooler
uintDType = "uint32"
realDType = GetNTAReal()
class SpatialPoolerTest(unittest.TestCase):
"""Unit Tests for C++ SpatialPooler class."""
def testCalculateOverlap(self):
sp = SpatialPooler(inputDimensions = [10],
columnDimensions = [5])
permanences = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
]
inputVectors = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
]
expectedOverlaps = [
[0, 0, 0, 0, 0],
[10, 8, 6, 4, 2],
[5, 4, 3, 2, 1],
[5, 3, 1, 0, 0],
[1, 1, 1, 1, 1]
]
for column, permanence in enumerate(permanences):
sp.setPermanence(column, np.array(permanence, dtype=realDType))
for inputVector, expectedOverlap in zip(inputVectors, expectedOverlaps):
inputVector = np.array(inputVector, dtype=uintDType)
overlap = set(sp._calculateOverlap(inputVector))
expected = set(expectedOverlap)
self.assertSetEqual(overlap, expected,
"Input: {0}\tExpected: {1}\tActual: {2}".format(
inputVector, expected, overlap))
def testInhibitColumnsGlobal(self):
sp = SpatialPooler(inputDimensions = [10],
columnDimensions = [10],
globalInhibition = True,
numActiveColumnsPerInhArea = 10)
overlaps = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expectedActive = set([5, 6, 7, 8, 9])
active = sp._inhibitColumns(np.array(overlaps, dtype=realDType))
active = set(active)
self.assertSetEqual(active, expectedActive,
"Input: {0}\tExpected: {1}\tActual: {2}".format(
overlaps, expectedActive, active))
def testUpdatePermanencesForColumn(self):
sp = SpatialPooler(inputDimensions = [5],
columnDimensions = [5])
sp.setSynPermTrimThreshold(0.05)
permanencesList = [
[ -0.10, 0.500, 0.400, 0.010, 0.020 ],
[ 0.300, 0.010, 0.020, 0.120, 0.090 ],
[ 0.070, 0.050, 1.030, 0.190, 0.060 ],
[ 0.180, 0.090, 0.110, 0.010, 0.030 ],
[ 0.200, 0.101, 0.050, -0.09, 1.100 ]]
expectedPermanencesList = [
[ 0.000, 0.500, 0.400, 0.000, 0.000],
# Clip - - Trim Trim
[0.300, 0.000, 0.000, 0.120, 0.090],
# - Trim Trim - -
[0.070, 0.050, 1.000, 0.190, 0.060],
# - - Clip - -
[0.180, 0.090, 0.110, 0.000, 0.000],
# - - - Trim Trim
[0.200, 0.101, 0.050, 0.000, 1.000]]
# - - - Clip Clip
expectedConnectedSynapsesList = [
[0, 1, 1, 0, 0],
[1, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 0, 1]]
expectedConnectedCounts = [2, 2, 2, 2, 3]
for i in xrange(5):
permanences = np.array(permanencesList[i], dtype=realDType)
expectedPermanences = np.array(expectedPermanencesList[i],
dtype=realDType)
expectedConnectedSynapses = expectedConnectedSynapsesList[i]
sp._updatePermanencesForColumn(permanences, i, False)
updatedPermanences = np.zeros(5, dtype=realDType)
connectedSynapses = np.zeros(5, dtype=uintDType)
connectedCounts = np.zeros(5, dtype=uintDType)
sp.getPermanence(i, updatedPermanences)
sp.getConnectedSynapses(i, connectedSynapses)
sp.getConnectedCounts(connectedCounts)
np.testing.assert_almost_equal(updatedPermanences, expectedPermanences)
self.assertEqual(list(connectedSynapses), expectedConnectedSynapses)
self.assertEqual(connectedCounts[i], expectedConnectedCounts[i])
def testUpdateDutyCycles(self):
sp = SpatialPooler(inputDimensions = [5],
columnDimensions = [5])
initOverlapArr1 = np.array([1, 1, 1, 1, 1], dtype=realDType)
sp.setOverlapDutyCycles(initOverlapArr1);
overlaps = np.array([1, 5, 7, 0, 0], dtype=uintDType)
active = np.array([0, 0, 0, 0, 0], dtype=uintDType)
sp.setIterationNum(2)
sp._updateDutyCycles(overlaps, active);
resultOverlapArr1 = np.zeros(5, dtype=realDType)
sp.getOverlapDutyCycles(resultOverlapArr1)
trueOverlapArr1 = np.array([1, 1, 1, 0.5, 0.5], dtype=realDType)
self.assertEqual(list(resultOverlapArr1), list(trueOverlapArr1))
sp.setOverlapDutyCycles(initOverlapArr1);
sp.setIterationNum(2000);
sp.setUpdatePeriod(1000);
sp._updateDutyCycles(overlaps, active);
resultOverlapArr2 = np.zeros(5, dtype=realDType)
sp.getOverlapDutyCycles(resultOverlapArr2);
trueOverlapArr2 = np.array([1, 1, 1, 0.999, 0.999], dtype=realDType)
self.assertEqual(list(resultOverlapArr2), list(trueOverlapArr2))
def testComputeParametersValidation(self):
sp = SpatialPooler(inputDimensions=[5], columnDimensions=[5])
inputGood = np.ones(5, dtype=uintDType)
outGood = np.zeros(5, dtype=uintDType)
inputBad = np.ones(5, dtype=realDType)
inputBad2D = np.ones((5, 5), dtype=realDType)
outBad = np.zeros(5, dtype=realDType)
outBad2D = np.zeros((5, 5), dtype=realDType)
# Validate good parameters
sp.compute(inputGood, False, outGood)
# Validate bad parameters
with self.assertRaises(RuntimeError):
sp.compute(inputBad, False, outBad)
# Validate bad input
with self.assertRaises(RuntimeError):
sp.compute(inputBad, False, outGood)
# Validate bad 2d input
with self.assertRaises(RuntimeError):
sp.compute(inputBad2D, False, outGood)
# Validate bad output
with self.assertRaises(RuntimeError):
sp.compute(inputGood, False, outBad)
# Validate bad 2d output
with self.assertRaises(RuntimeError):
sp.compute(inputGood, False, outBad2D)
if __name__ == "__main__":
unittest.main()
| 7,313 | Python | .py | 166 | 37.421687 | 77 | 0.625951 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,092 | anomaly_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/anomaly_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for anomaly-related algorithms."""
import unittest
from numpy import array
import pickle
from nupic.algorithms import anomaly
from nupic.algorithms.anomaly import Anomaly
class AnomalyTest(unittest.TestCase):
"""Tests for anomaly score functions and classes."""
def testComputeRawAnomalyScoreNoActiveOrPredicted(self):
score = anomaly.computeRawAnomalyScore(array([]), array([]))
self.assertAlmostEqual(score, 0.0)
def testComputeRawAnomalyScoreNoActive(self):
score = anomaly.computeRawAnomalyScore(array([]), array([3, 5]))
self.assertAlmostEqual(score, 0.0)
def testComputeRawAnomalyScorePerfectMatch(self):
score = anomaly.computeRawAnomalyScore(array([3, 5, 7]), array([3, 5, 7]))
self.assertAlmostEqual(score, 0.0)
def testComputeRawAnomalyScoreNoMatch(self):
score = anomaly.computeRawAnomalyScore(array([2, 4, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 1.0)
def testComputeRawAnomalyScorePartialMatch(self):
score = anomaly.computeRawAnomalyScore(array([2, 3, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 2.0 / 3.0)
def testComputeAnomalyScoreNoActiveOrPredicted(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([]), array([]))
self.assertAlmostEqual(score, 0.0)
def testComputeAnomalyScoreNoActive(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([]), array([3, 5]))
self.assertAlmostEqual(score, 0.0)
def testComputeAnomalyScorePerfectMatch(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([3, 5, 7]), array([3, 5, 7]))
self.assertAlmostEqual(score, 0.0)
def testComputeAnomalyScoreNoMatch(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([2, 4, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 1.0)
def testComputeAnomalyScorePartialMatch(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([2, 3, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 2.0 / 3.0)
def testAnomalyCumulative(self):
"""Test cumulative anomaly scores."""
anomalyComputer = anomaly.Anomaly(slidingWindowSize=3)
predicted = (array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]),
array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]),
array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]))
actual = (array([1, 2, 6]), array([1, 2, 6]), array([1, 4, 6]),
array([10, 11, 6]), array([10, 11, 12]), array([10, 11, 12]),
array([10, 11, 12]), array([1, 2, 6]), array([1, 2, 6]))
anomalyExpected = (0.0, 0.0, 1.0/9.0, 3.0/9.0, 2.0/3.0, 8.0/9.0, 1.0,
2.0/3.0, 1.0/3.0)
for act, pred, expected in zip(actual, predicted, anomalyExpected):
score = anomalyComputer.compute(act, pred)
self.assertAlmostEqual(
score, expected, places=5,
msg="Anomaly score of %f doesn't match expected of %f" % (
score, expected))
def testComputeAnomalySelectModePure(self):
anomalyComputer = anomaly.Anomaly(mode=anomaly.Anomaly.MODE_PURE)
score = anomalyComputer.compute(array([2, 3, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 2.0 / 3.0)
def testSerialization(self):
"""serialization using pickle"""
# instances to test
aDef = Anomaly()
aLike = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
aWeig = Anomaly(mode=Anomaly.MODE_WEIGHTED)
# test anomaly with all whistles (MovingAverage, Likelihood, ...)
aAll = Anomaly(mode=Anomaly.MODE_LIKELIHOOD, slidingWindowSize=5)
inst = [aDef, aLike, aWeig, aAll]
for a in inst:
stored = pickle.dumps(a)
restored = pickle.loads(stored)
self.assertEqual(a, restored)
def testEquals(self):
an = Anomaly()
anP = Anomaly()
self.assertEqual(an, anP, "default constructors equal")
anN = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
self.assertNotEqual(an, anN)
an = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
self.assertEqual(an, anN)
an = Anomaly(slidingWindowSize=5, mode=Anomaly.MODE_WEIGHTED, binaryAnomalyThreshold=0.9)
anP = Anomaly(slidingWindowSize=5, mode=Anomaly.MODE_WEIGHTED, binaryAnomalyThreshold=0.9)
anN = Anomaly(slidingWindowSize=4, mode=Anomaly.MODE_WEIGHTED, binaryAnomalyThreshold=0.9)
self.assertEqual(an, anP)
self.assertNotEqual(an, anN)
anN = Anomaly(slidingWindowSize=5, mode=Anomaly.MODE_WEIGHTED, binaryAnomalyThreshold=0.5)
self.assertNotEqual(an, anN)
if __name__ == "__main__":
unittest.main()
| 5,622 | Python | .py | 114 | 44.333333 | 94 | 0.687775 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,093 | connections_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/connections_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import tempfile
import unittest
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto import ConnectionsProto_capnp
from nupic.algorithms.connections import Connections
class ConnectionsTest(unittest.TestCase):
def testCreateSegment(self):
connections = Connections(1024)
segment1 = connections.createSegment(10)
self.assertEqual(segment1.cell, 10)
segment2 = connections.createSegment(10)
self.assertEqual(segment2.cell, 10)
self.assertEqual([segment1, segment2],
list(connections.segmentsForCell(10)))
def testDestroySegment(self):
""" Creates a segment, destroys it, and makes sure it got destroyed along
with all of its synapses.
"""
connections = Connections(1024)
connections.createSegment(10)
segment2 = connections.createSegment(20)
connections.createSegment(30)
connections.createSegment(40)
connections.createSynapse(segment2, 80, 0.85)
connections.createSynapse(segment2, 81, 0.85)
connections.createSynapse(segment2, 82, 0.15)
self.assertEqual(4, connections.numSegments())
self.assertEqual(3, connections.numSynapses())
connections.destroySegment(segment2)
self.assertEqual(3, connections.numSegments())
self.assertEqual(0, connections.numSynapses())
(numActiveConnected,
numActivePotential) = connections.computeActivity([80, 81, 82], 0.5)
self.assertEqual(0, numActiveConnected[segment2.flatIdx])
self.assertEqual(0, numActivePotential[segment2.flatIdx])
def testDestroySynapse(self):
""" Creates a segment, creates a number of synapses on it, destroys a
synapse, and makes sure it got destroyed.
"""
connections = Connections(1024)
segment = connections.createSegment(20)
synapse1 = connections.createSynapse(segment, 80, .85)
synapse2 = connections.createSynapse(segment, 81, .85)
synapse3 = connections.createSynapse(segment, 82, .15)
self.assertEqual(3, connections.numSynapses())
connections.destroySynapse(synapse2)
self.assertEqual(2, connections.numSynapses())
self.assertEqual(set([synapse1, synapse3]),
connections.synapsesForSegment(segment))
(numActiveConnected,
numActivePotential) = connections.computeActivity([80, 81, 82], .5)
self.assertEqual(1, numActiveConnected[segment.flatIdx])
self.assertEqual(2, numActivePotential[segment.flatIdx])
def testPathsNotInvalidatedByOtherDestroys(self):
""" Creates segments and synapses, then destroys segments and synapses on
either side of them and verifies that existing Segment and Synapse
instances still point to the same segment / synapse as before.
"""
connections = Connections(1024)
segment1 = connections.createSegment(11)
connections.createSegment(12)
segment3 = connections.createSegment(13)
connections.createSegment(14)
segment5 = connections.createSegment(15)
synapse1 = connections.createSynapse(segment3, 201, .85)
synapse2 = connections.createSynapse(segment3, 202, .85)
synapse3 = connections.createSynapse(segment3, 203, .85)
synapse4 = connections.createSynapse(segment3, 204, .85)
synapse5 = connections.createSynapse(segment3, 205, .85)
self.assertEqual(203, synapse3.presynapticCell)
connections.destroySynapse(synapse1)
self.assertEqual(203, synapse3.presynapticCell)
connections.destroySynapse(synapse5)
self.assertEqual(203, synapse3.presynapticCell)
connections.destroySegment(segment1)
self.assertEqual(set([synapse2, synapse3, synapse4]),
connections.synapsesForSegment(segment3))
connections.destroySegment(segment5)
self.assertEqual(set([synapse2, synapse3, synapse4]),
connections.synapsesForSegment(segment3))
self.assertEqual(203, synapse3.presynapticCell)
def testDestroySegmentWithDestroyedSynapses(self):
""" Destroy a segment that has a destroyed synapse and a non-destroyed
synapse. Make sure nothing gets double-destroyed.
"""
connections = Connections(1024)
segment1 = connections.createSegment(11)
segment2 = connections.createSegment(12)
connections.createSynapse(segment1, 101, .85)
synapse2a = connections.createSynapse(segment2, 201, .85)
connections.createSynapse(segment2, 202, .85)
self.assertEqual(3, connections.numSynapses())
connections.destroySynapse(synapse2a)
self.assertEqual(2, connections.numSegments())
self.assertEqual(2, connections.numSynapses())
connections.destroySegment(segment2)
self.assertEqual(1, connections.numSegments())
self.assertEqual(1, connections.numSynapses())
def testReuseSegmentWithDestroyedSynapses(self):
""" Destroy a segment that has a destroyed synapse and a non-destroyed
synapse. Create a new segment in the same place. Make sure its synapse
count is correct.
"""
connections = Connections(1024)
segment = connections.createSegment(11)
synapse1 = connections.createSynapse(segment, 201, .85)
connections.createSynapse(segment, 202, .85)
connections.destroySynapse(synapse1)
self.assertEqual(1, connections.numSynapses(segment))
connections.destroySegment(segment)
reincarnated = connections.createSegment(11)
self.assertEqual(0, connections.numSynapses(reincarnated))
self.assertEqual(0, len(connections.synapsesForSegment(reincarnated)))
def testUpdateSynapsePermanence(self):
""" Creates a synapse and updates its permanence, and makes sure that its
data was correctly updated.
"""
connections = Connections(1024)
segment = connections.createSegment(10)
synapse = connections.createSynapse(segment, 50, .34)
connections.updateSynapsePermanence(synapse, .21)
synapseData = connections.dataForSynapse(synapse)
self.assertAlmostEqual(synapseData.permanence, .21)
def testComputeActivity(self):
""" Creates a sample set of connections, and makes sure that computing the
activity for a collection of cells with no activity returns the right
activity data.
"""
connections = Connections(1024)
# Cell with 1 segment.
# Segment with:
# - 1 connected synapse: active
# - 2 matching synapses
segment1a = connections.createSegment(10)
connections.createSynapse(segment1a, 150, .85)
connections.createSynapse(segment1a, 151, .15)
# Cell with 1 segment.
# Segment with:
# - 2 connected synapse: 2 active
# - 3 matching synapses: 3 active
segment2a = connections.createSegment(20)
connections.createSynapse(segment2a, 80, .85)
connections.createSynapse(segment2a, 81, .85)
synapse = connections.createSynapse(segment2a, 82, .85)
connections.updateSynapsePermanence(synapse, .15)
inputVec = [50, 52, 53, 80, 81, 82, 150, 151]
(numActiveConnected,
numActivePotential) = connections.computeActivity(inputVec, .5)
self.assertEqual(1, numActiveConnected[segment1a.flatIdx])
self.assertEqual(2, numActivePotential[segment1a.flatIdx])
self.assertEqual(2, numActiveConnected[segment2a.flatIdx])
self.assertEqual(3, numActivePotential[segment2a.flatIdx])
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
c1 = Connections(1024)
# Add data before serializing
s1 = c1.createSegment(0)
c1.createSynapse(s1, 254, 0.1173)
s2 = c1.createSegment(100)
c1.createSynapse(s2, 20, 0.3)
c1.createSynapse(s1, 40, 0.3)
s3 = c1.createSegment(0)
c1.createSynapse(s3, 0, 0.5)
c1.createSynapse(s3, 1, 0.5)
s4 = c1.createSegment(10)
c1.createSynapse(s4, 0, 0.5)
c1.createSynapse(s4, 1, 0.5)
c1.destroySegment(s4)
proto1 = ConnectionsProto_capnp.ConnectionsProto.new_message()
c1.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = ConnectionsProto_capnp.ConnectionsProto.read(f)
# Load the deserialized proto
c2 = Connections.read(proto2)
# Check that the two connections objects are functionally equal
self.assertEqual(c1, c2)
if __name__ == '__main__':
unittest.main()
| 9,330 | Python | .py | 206 | 40.058252 | 78 | 0.735999 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,094 | spatial_pooler_compute_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/spatial_pooler_compute_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import time
import unittest2 as unittest
from nupic.bindings.math import (count_gte,
GetNTAReal,
SM_01_32_32 as SparseBinaryMatrix,
SM32 as SparseMatrix)
from nupic.algorithms.spatial_pooler import SpatialPooler
from nupic.support.unittesthelpers.algorithm_test_helpers \
import getNumpyRandomGenerator, convertSP, CreateSP
uintType = "uint32"
class SpatialPoolerComputeTest(unittest.TestCase):
"""
End to end tests of the compute function for the SpatialPooler class with no
mocking anywhere.
"""
def basicComputeLoop(self, imp, params, inputSize, columnDimensions,
seed = None):
"""
Feed in some vectors and retrieve outputs. Ensure the right number of
columns win, that we always get binary outputs, and that nothing crashes.
"""
sp = CreateSP(imp,params)
# Create a set of input vectors as well as various numpy vectors we will
# need to retrieve data from the SP
numRecords = 100
randomState = getNumpyRandomGenerator(seed)
inputMatrix = (
randomState.rand(numRecords,inputSize) > 0.8).astype(uintType)
y = numpy.zeros(columnDimensions, dtype = uintType)
dutyCycles = numpy.zeros(columnDimensions, dtype = uintType)
# With learning on we should get the requested number of winners
for v in inputMatrix:
y.fill(0)
sp.compute(v, True, y)
self.assertEqual(sp.getNumActiveColumnsPerInhArea(),y.sum())
self.assertEqual(0,y.min())
self.assertEqual(1,y.max())
# With learning off and some prior training we should get the requested
# number of winners
for v in inputMatrix:
y.fill(0)
sp.compute(v, False, y)
self.assertEqual(sp.getNumActiveColumnsPerInhArea(),y.sum())
self.assertEqual(0,y.min())
self.assertEqual(1,y.max())
def testBasicCompute1(self):
"""
Run basicComputeLoop with mostly default parameters
"""
# Size of each input vector
inputSize = 30
# Size of each output SDR vector
columnDimensions = 50
params = {
"inputDimensions": [inputSize],
"columnDimensions": [columnDimensions],
"potentialRadius": inputSize,
'globalInhibition': True,
"seed": int((time.time()%10000)*10),
}
print "testBasicCompute1, SP seed set to:",params['seed']
self.basicComputeLoop('py', params, inputSize, columnDimensions)
self.basicComputeLoop('cpp', params, inputSize, columnDimensions)
def testBasicCompute2(self):
"""
Run basicComputeLoop with learning turned off.
"""
# Size of each input vector
inputSize = 100
# Size of each output SDR vector
columnDimensions = 100
params = {
"inputDimensions": [inputSize],
"columnDimensions": [columnDimensions],
"potentialRadius": inputSize,
'globalInhibition': True,
"synPermActiveInc": 0.0,
"synPermInactiveDec": 0.0,
"seed": int((time.time()%10000)*10),
}
print "testBasicCompute2, SP seed set to:",params['seed']
self.basicComputeLoop('py', params, inputSize, columnDimensions)
self.basicComputeLoop('cpp', params, inputSize, columnDimensions)
if __name__ == "__main__":
unittest.main()
| 4,279 | Python | .py | 106 | 34.962264 | 78 | 0.681041 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,095 | temporal_memory_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/temporal_memory_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import tempfile
import unittest
from nupic.algorithms.temporal_memory import TemporalMemory
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto import TemporalMemoryProto_capnp
class TemporalMemoryTest(unittest.TestCase):
def testInitInvalidParams(self):
# Invalid columnDimensions
kwargs = {"columnDimensions": [], "cellsPerColumn": 32}
self.assertRaises(ValueError, TemporalMemory, **kwargs)
# Invalid cellsPerColumn
kwargs = {"columnDimensions": [2048], "cellsPerColumn": 0}
self.assertRaises(ValueError, TemporalMemory, **kwargs)
kwargs = {"columnDimensions": [2048], "cellsPerColumn": -10}
self.assertRaises(ValueError, TemporalMemory, **kwargs)
def testActivateCorrectlyPredictiveCells(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.5,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0]
activeColumns = [1]
previousActiveCells = [0,1,2,3]
expectedActiveCells = [4]
activeSegment = tm.createSegment(expectedActiveCells[0])
tm.connections.createSynapse(activeSegment, previousActiveCells[0], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[1], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[2], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[3], .5)
tm.compute(previousActiveColumns, True)
self.assertEqual(expectedActiveCells, tm.getPredictiveCells())
tm.compute(activeColumns, True)
self.assertEqual(expectedActiveCells, tm.getActiveCells())
def testBurstUnpredictedColumns(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.5,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
activeColumns = [0]
burstingCells = [0, 1, 2, 3]
tm.compute(activeColumns, True)
self.assertEqual(burstingCells, tm.getActiveCells())
def testZeroActiveColumns(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.5,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0]
previousActiveCells = [0, 1, 2, 3]
expectedActiveCells = [4]
segment = tm.createSegment(expectedActiveCells[0])
tm.connections.createSynapse(segment, previousActiveCells[0], .5)
tm.connections.createSynapse(segment, previousActiveCells[1], .5)
tm.connections.createSynapse(segment, previousActiveCells[2], .5)
tm.connections.createSynapse(segment, previousActiveCells[3], .5)
tm.compute(previousActiveColumns, True)
self.assertFalse(len(tm.getActiveCells()) == 0)
self.assertFalse(len(tm.getWinnerCells()) == 0)
self.assertFalse(len(tm.getPredictiveCells()) == 0)
zeroColumns = []
tm.compute(zeroColumns, True)
self.assertTrue(len(tm.getActiveCells()) == 0)
self.assertTrue(len(tm.getWinnerCells()) == 0)
self.assertTrue(len(tm.getPredictiveCells()) == 0)
def testPredictedActiveCellsAreAlwaysWinners(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.5,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0]
activeColumns = [1]
previousActiveCells = [0, 1, 2, 3]
expectedWinnerCells = [4, 6]
activeSegment1 = tm.createSegment(expectedWinnerCells[0])
tm.connections.createSynapse(activeSegment1, previousActiveCells[0], .5)
tm.connections.createSynapse(activeSegment1, previousActiveCells[1], .5)
tm.connections.createSynapse(activeSegment1, previousActiveCells[2], .5)
activeSegment2 = tm.createSegment(expectedWinnerCells[1])
tm.connections.createSynapse(activeSegment2, previousActiveCells[0], .5)
tm.connections.createSynapse(activeSegment2, previousActiveCells[1], .5)
tm.connections.createSynapse(activeSegment2, previousActiveCells[2], .5)
tm.compute(previousActiveColumns, False)
tm.compute(activeColumns, False)
self.assertEqual(expectedWinnerCells, tm.getWinnerCells())
def testReinforceCorrectlyActiveSegments(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.2,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.08,
predictedSegmentDecrement=0.02,
seed=42)
prevActiveColumns = [0]
prevActiveCells = [0,1,2,3]
activeColumns = [1]
activeCell = 5
activeSegment = tm.createSegment(activeCell)
as1 = tm.connections.createSynapse(activeSegment, prevActiveCells[0], .5)
as2 = tm.connections.createSynapse(activeSegment, prevActiveCells[1], .5)
as3 = tm.connections.createSynapse(activeSegment, prevActiveCells[2], .5)
is1 = tm.connections.createSynapse(activeSegment, 81, .5) #inactive synapse
tm.compute(prevActiveColumns, True)
tm.compute(activeColumns, True)
self.assertAlmostEqual(.6, tm.connections.dataForSynapse(as1).permanence)
self.assertAlmostEqual(.6, tm.connections.dataForSynapse(as2).permanence)
self.assertAlmostEqual(.6, tm.connections.dataForSynapse(as3).permanence)
self.assertAlmostEqual(.42, tm.connections.dataForSynapse(is1).permanence)
def testReinforceSelectedMatchingSegmentInBurstingColumn(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.08,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0]
previousActiveCells = [0,1,2,3]
activeColumns = [1]
burstingCells = [4,5,6,7]
selectedMatchingSegment = tm.createSegment(burstingCells[0])
as1 = tm.connections.createSynapse(selectedMatchingSegment,
previousActiveCells[0], .3)
as2 = tm.connections.createSynapse(selectedMatchingSegment,
previousActiveCells[1], .3)
as3 = tm.connections.createSynapse(selectedMatchingSegment,
previousActiveCells[2], .3)
is1 = tm.connections.createSynapse(selectedMatchingSegment, 81, .3)
otherMatchingSegment = tm.createSegment(burstingCells[1])
tm.connections.createSynapse(otherMatchingSegment,
previousActiveCells[0], .3)
tm.connections.createSynapse(otherMatchingSegment,
previousActiveCells[1], .3)
tm.connections.createSynapse(otherMatchingSegment, 81, .3)
tm.compute(previousActiveColumns, True)
tm.compute(activeColumns, True)
self.assertAlmostEqual(.4, tm.connections.dataForSynapse(as1).permanence)
self.assertAlmostEqual(.4, tm.connections.dataForSynapse(as2).permanence)
self.assertAlmostEqual(.4, tm.connections.dataForSynapse(as3).permanence)
self.assertAlmostEqual(.22, tm.connections.dataForSynapse(is1).permanence)
def testNoChangeToNonselectedMatchingSegmentsInBurstingColumn(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.08,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0]
previousActiveCells = [0,1,2,3]
activeColumns = [1]
burstingCells = [4,5,6,7]
selectedMatchingSegment = tm.createSegment(burstingCells[0])
tm.connections.createSynapse(selectedMatchingSegment,
previousActiveCells[0], .3)
tm.connections.createSynapse(selectedMatchingSegment,
previousActiveCells[1], .3)
tm.connections.createSynapse(selectedMatchingSegment,
previousActiveCells[2], .3)
tm.connections.createSynapse(selectedMatchingSegment, 81, .3)
otherMatchingSegment = tm.createSegment(burstingCells[1])
as1 = tm.connections.createSynapse(otherMatchingSegment,
previousActiveCells[0], .3)
as2 = tm.connections.createSynapse(otherMatchingSegment,
previousActiveCells[1], .3)
is1 = tm.connections.createSynapse(otherMatchingSegment, 81, .3)
tm.compute(previousActiveColumns, True)
tm.compute(activeColumns, True)
self.assertAlmostEqual(.3, tm.connections.dataForSynapse(as1).permanence)
self.assertAlmostEqual(.3, tm.connections.dataForSynapse(as2).permanence)
self.assertAlmostEqual(.3, tm.connections.dataForSynapse(is1).permanence)
def testNoChangeToMatchingSegmentsInPredictedActiveColumn(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0]
activeColumns = [1]
previousActiveCells = [0,1,2,3]
expectedActiveCells = [4]
otherburstingCells = [5,6,7]
activeSegment = tm.createSegment(expectedActiveCells[0])
tm.connections.createSynapse(activeSegment, previousActiveCells[0], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[1], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[2], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[3], .5)
matchingSegmentOnSameCell = tm.createSegment(
expectedActiveCells[0])
s1 = tm.connections.createSynapse(matchingSegmentOnSameCell,
previousActiveCells[0], .3)
s2 = tm.connections.createSynapse(matchingSegmentOnSameCell,
previousActiveCells[1], .3)
matchingSegmentOnOtherCell = tm.createSegment(
otherburstingCells[0])
s3 = tm.connections.createSynapse(matchingSegmentOnOtherCell,
previousActiveCells[0], .3)
s4 = tm.connections.createSynapse(matchingSegmentOnOtherCell,
previousActiveCells[1], .3)
tm.compute(previousActiveColumns, True)
self.assertEqual(expectedActiveCells, tm.getPredictiveCells())
tm.compute(activeColumns, True)
self.assertAlmostEqual(.3, tm.connections.dataForSynapse(s1).permanence)
self.assertAlmostEqual(.3, tm.connections.dataForSynapse(s2).permanence)
self.assertAlmostEqual(.3, tm.connections.dataForSynapse(s3).permanence)
self.assertAlmostEqual(.3, tm.connections.dataForSynapse(s4).permanence)
def testNoNewSegmentIfNotEnoughWinnerCells(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
zeroColumns = []
activeColumns = [0]
tm.compute(zeroColumns, True)
tm.compute(activeColumns, True)
self.assertEqual(0, tm.connections.numSegments())
def testNewSegmentAddSynapsesToSubsetOfWinnerCells(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=2,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0, 1, 2]
activeColumns = [4]
tm.compute(previousActiveColumns, True)
prevWinnerCells = tm.getWinnerCells() #[0, 8, 7]
self.assertEqual(3, len(prevWinnerCells))
tm.compute(activeColumns, True)
winnerCells = tm.getWinnerCells() #[18]
self.assertEqual(1, len(winnerCells))
segments = list(tm.connections.segmentsForCell(winnerCells[0]))
self.assertEqual(1, len(segments))
synapses = list(tm.connections.synapsesForSegment(segments[0]))
self.assertEqual(2, len(synapses))
for synapse in synapses:
synapseData = tm.connections.dataForSynapse(synapse)
self.assertAlmostEqual(.21, synapseData.permanence)
self.assertTrue(synapseData.presynapticCell in prevWinnerCells)
def testNewSegmentAddSynapsesToAllWinnerCells(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0, 1, 2]
activeColumns = [4]
tm.compute(previousActiveColumns)
prevWinnerCells = sorted(tm.getWinnerCells())
self.assertEqual(3, len(prevWinnerCells))
tm.compute(activeColumns)
winnerCells = tm.getWinnerCells()
self.assertEqual(1, len(winnerCells))
segments = list(tm.connections.segmentsForCell(winnerCells[0]))
self.assertEqual(1, len(segments))
synapses = list(tm.connections.synapsesForSegment(segments[0]))
self.assertEqual(3, len(synapses))
presynapticCells = []
for synapse in synapses:
synapseData = tm.connections.dataForSynapse(synapse)
self.assertAlmostEqual(.21, synapseData.permanence)
presynapticCells.append(synapseData.presynapticCell)
presynapticCells = sorted(presynapticCells)
self.assertEqual(prevWinnerCells, presynapticCells)
def testMatchingSegmentAddSynapsesToSubsetOfWinnerCells(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=1,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=1,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0, 1, 2, 3]
prevWinnerCells = [0, 1, 2, 3]
activeColumns = [4]
matchingSegment = tm.createSegment(4)
tm.connections.createSynapse(matchingSegment, 0, .5)
tm.compute(previousActiveColumns, True)
self.assertEqual(prevWinnerCells, tm.getWinnerCells())
tm.compute(activeColumns, True)
synapses = tm.connections.synapsesForSegment(matchingSegment)
self.assertEqual(3, len(synapses))
for synapse in synapses:
synapseData = tm.connections.dataForSynapse(synapse)
if synapseData.presynapticCell != 0:
self.assertAlmostEqual(.21, synapseData.permanence)
self.assertTrue(synapseData.presynapticCell == prevWinnerCells[1] or
synapseData.presynapticCell == prevWinnerCells[2] or
synapseData.presynapticCell == prevWinnerCells[3])
def testMatchingSegmentAddSynapsesToAllWinnerCells(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=1,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=1,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0, 1]
prevWinnerCells = [0, 1]
activeColumns = [4]
matchingSegment = tm.createSegment(4)
tm.connections.createSynapse(matchingSegment, 0, .5)
tm.compute(previousActiveColumns, True)
self.assertEqual(prevWinnerCells, tm.getWinnerCells())
tm.compute(activeColumns)
synapses = tm.connections.synapsesForSegment(matchingSegment)
self.assertEqual(2, len(synapses))
for synapse in synapses:
synapseData = tm.connections.dataForSynapse(synapse)
if synapseData.presynapticCell != 0:
self.assertAlmostEqual(.21, synapseData.permanence)
self.assertEqual(prevWinnerCells[1], synapseData.presynapticCell)
def testActiveSegmentGrowSynapsesAccordingToPotentialOverlap(self):
"""
When a segment becomes active, grow synapses to previous winner cells.
The number of grown synapses is calculated from the "matching segment"
overlap, not the "active segment" overlap.
"""
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=1,
activationThreshold=2,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=1,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
# Use 1 cell per column so that we have easy control over the winner cells.
previousActiveColumns = [0, 1, 2, 3, 4]
prevWinnerCells = [0, 1, 2, 3, 4]
activeColumns = [5]
activeSegment = tm.createSegment(5)
tm.connections.createSynapse(activeSegment, 0, .5)
tm.connections.createSynapse(activeSegment, 1, .5)
tm.connections.createSynapse(activeSegment, 2, .2)
tm.compute(previousActiveColumns, True)
self.assertEqual(prevWinnerCells, tm.getWinnerCells())
tm.compute(activeColumns, True)
presynapticCells = set(synapse.presynapticCell for synapse in
tm.connections.synapsesForSegment(activeSegment))
self.assertTrue(presynapticCells == set([0, 1, 2, 3]) or
presynapticCells == set([0, 1, 2, 4]))
def testDestroyWeakSynapseOnWrongPrediction(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.2,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.02,
seed=42)
previousActiveColumns = [0]
previousActiveCells = [0, 1, 2, 3]
activeColumns = [2]
expectedActiveCells = [5]
activeSegment = tm.createSegment(expectedActiveCells[0])
tm.connections.createSynapse(activeSegment, previousActiveCells[0], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[1], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[2], .5)
# Weak synapse.
tm.connections.createSynapse(activeSegment, previousActiveCells[3], .015)
tm.compute(previousActiveColumns, True)
tm.compute(activeColumns, True)
self.assertEqual(3, tm.connections.numSynapses(activeSegment))
def testDestroyWeakSynapseOnActiveReinforce(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.2,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.02,
seed=42)
previousActiveColumns = [0]
previousActiveCells = [0, 1, 2, 3]
activeColumns = [2]
activeCell = 5
activeSegment = tm.createSegment(activeCell)
tm.connections.createSynapse(activeSegment, previousActiveCells[0], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[1], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[2], .5)
# Weak inactive synapse.
tm.connections.createSynapse(activeSegment, previousActiveCells[3], .009)
tm.compute(previousActiveColumns, True)
tm.compute(activeColumns, True)
self.assertEqual(3, tm.connections.numSynapses(activeSegment))
def testRecycleWeakestSynapseToMakeRoomForNewSynapse(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=1,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.50,
minThreshold=1,
maxNewSynapseCount=3,
permanenceIncrement=.02,
permanenceDecrement=.02,
predictedSegmentDecrement=0.0,
seed=42,
maxSynapsesPerSegment=4)
prevActiveColumns = [1, 2, 3]
prevWinnerCells = [1, 2, 3]
activeColumns = [4]
matchingSegment = tm.createSegment(4)
tm.connections.createSynapse(matchingSegment, 81, .6)
# Create a weak synapse. Make sure it's not so weak that permanenceIncrement
# destroys it.
tm.connections.createSynapse(matchingSegment, 0, .11)
# Create a synapse that will match.
tm.connections.createSynapse(matchingSegment, 1, .20)
# Create a synapse with a high permanence
tm.connections.createSynapse(matchingSegment, 31, .60)
tm.compute(prevActiveColumns)
self.assertEqual(prevWinnerCells, tm.getWinnerCells())
tm.compute(activeColumns)
synapses = tm.connections.synapsesForSegment(matchingSegment)
self.assertEqual(4, len(synapses))
presynapticCells = set(synapse.presynapticCell for synapse in synapses)
self.assertEqual(set([1, 2, 3, 31]), presynapticCells)
def testRecycleLeastRecentlyActiveSegmentToMakeRoomForNewSegment(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=1,
activationThreshold=3,
initialPermanence=.50,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.02,
permanenceDecrement=.02,
predictedSegmentDecrement=0.0,
seed=42,
maxSegmentsPerCell=2)
prevActiveColumns1 = [0, 1, 2]
prevActiveColumns2 = [3, 4, 5]
prevActiveColumns3 = [6, 7, 8]
activeColumns = [9]
tm.compute(prevActiveColumns1)
tm.compute(activeColumns)
self.assertEqual(1, tm.connections.numSegments(9))
oldestSegment = list(tm.connections.segmentsForCell(9))[0]
tm.reset()
tm.compute(prevActiveColumns2)
tm.compute(activeColumns)
self.assertEqual(2, tm.connections.numSegments(9))
oldPresynaptic = \
set(synapse.presynapticCell
for synapse in tm.connections.synapsesForSegment(oldestSegment))
tm.reset()
tm.compute(prevActiveColumns3)
tm.compute(activeColumns)
self.assertEqual(2, tm.connections.numSegments(9))
# Verify none of the segments are connected to the cells the old
# segment was connected to.
for segment in tm.connections.segmentsForCell(9):
newPresynaptic = set(synapse.presynapticCell
for synapse
in tm.connections.synapsesForSegment(segment))
self.assertEqual([], list(oldPresynaptic & newPresynaptic))
def testDestroySegmentsWithTooFewSynapsesToBeMatching(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.2,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.02,
seed=42)
prevActiveColumns = [0]
prevActiveCells = [0, 1, 2, 3]
activeColumns = [2]
expectedActiveCell = 5
matchingSegment = tm.createSegment(expectedActiveCell)
tm.connections.createSynapse(matchingSegment, prevActiveCells[0], .015)
tm.connections.createSynapse(matchingSegment, prevActiveCells[1], .015)
tm.connections.createSynapse(matchingSegment, prevActiveCells[2], .015)
tm.connections.createSynapse(matchingSegment, prevActiveCells[3], .015)
tm.compute(prevActiveColumns, True)
tm.compute(activeColumns, True)
self.assertEqual(0, tm.connections.numSegments(expectedActiveCell))
def testPunishMatchingSegmentsInInactiveColumns(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.2,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.02,
seed=42)
previousActiveColumns = [0]
previousActiveCells = [0, 1, 2, 3]
activeColumns = [1]
previousInactiveCell = 81
activeSegment = tm.createSegment(42)
as1 = tm.connections.createSynapse(activeSegment,
previousActiveCells[0], .5)
as2 = tm.connections.createSynapse(activeSegment,
previousActiveCells[1], .5)
as3 = tm.connections.createSynapse(activeSegment,
previousActiveCells[2], .5)
is1 = tm.connections.createSynapse(activeSegment,
previousInactiveCell, .5)
matchingSegment = tm.createSegment(43)
as4 = tm.connections.createSynapse(matchingSegment,
previousActiveCells[0], .5)
as5 = tm.connections.createSynapse(matchingSegment,
previousActiveCells[1], .5)
is2 = tm.connections.createSynapse(matchingSegment,
previousInactiveCell, .5)
tm.compute(previousActiveColumns, True)
tm.compute(activeColumns, True)
self.assertAlmostEqual(.48, tm.connections.dataForSynapse(as1).permanence)
self.assertAlmostEqual(.48, tm.connections.dataForSynapse(as2).permanence)
self.assertAlmostEqual(.48, tm.connections.dataForSynapse(as3).permanence)
self.assertAlmostEqual(.48, tm.connections.dataForSynapse(as4).permanence)
self.assertAlmostEqual(.48, tm.connections.dataForSynapse(as5).permanence)
self.assertAlmostEqual(.50, tm.connections.dataForSynapse(is1).permanence)
self.assertAlmostEqual(.50, tm.connections.dataForSynapse(is2).permanence)
def testAddSegmentToCellWithFewestSegments(self):
grewOnCell1 = False
grewOnCell2 = False
for seed in xrange(100):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.2,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.02,
seed=seed)
prevActiveColumns = [1, 2, 3, 4]
activeColumns = [0]
prevActiveCells = [4, 5, 6, 7]
nonMatchingCells = [0, 3]
activeCells = [0, 1, 2, 3]
segment1 = tm.createSegment(nonMatchingCells[0])
tm.connections.createSynapse(segment1, prevActiveCells[0], .5)
segment2 = tm.createSegment(nonMatchingCells[1])
tm.connections.createSynapse(segment2, prevActiveCells[1], .5)
tm.compute(prevActiveColumns, True)
tm.compute(activeColumns, True)
self.assertEqual(activeCells, tm.getActiveCells())
self.assertEqual(3, tm.connections.numSegments())
self.assertEqual(1, tm.connections.numSegments(0))
self.assertEqual(1, tm.connections.numSegments(3))
self.assertEqual(1, tm.connections.numSynapses(segment1))
self.assertEqual(1, tm.connections.numSynapses(segment2))
segments = list(tm.connections.segmentsForCell(1))
if len(segments) == 0:
segments2 = list(tm.connections.segmentsForCell(2))
self.assertFalse(len(segments2) == 0)
grewOnCell2 = True
segments.append(segments2[0])
else:
grewOnCell1 = True
self.assertEqual(1, len(segments))
synapses = list(tm.connections.synapsesForSegment(segments[0]))
self.assertEqual(4, len(synapses))
columnChecklist = set(prevActiveColumns)
for synapse in synapses:
synapseData = tm.connections.dataForSynapse(synapse)
self.assertAlmostEqual(.2, synapseData.permanence)
column = tm.columnForCell(synapseData.presynapticCell)
self.assertTrue(column in columnChecklist)
columnChecklist.remove(column)
self.assertTrue(len(columnChecklist) == 0)
self.assertTrue(grewOnCell1)
self.assertTrue(grewOnCell2)
def testConnectionsNeverChangeWhenLearningDisabled(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.2,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.02,
seed=42)
prevActiveColumns = [0]
prevActiveCells = [0, 1, 2, 3]
activeColumns = [1, 2] #1 is predicted, 2 is bursting
prevInactiveCell = 81
expectedActiveCells = [4]
correctActiveSegment = tm.createSegment(expectedActiveCells[0])
tm.connections.createSynapse(correctActiveSegment, prevActiveCells[0], .5)
tm.connections.createSynapse(correctActiveSegment, prevActiveCells[1], .5)
tm.connections.createSynapse(correctActiveSegment, prevActiveCells[2], .5)
wrongMatchingSegment = tm.createSegment(43)
tm.connections.createSynapse(wrongMatchingSegment, prevActiveCells[0], .5)
tm.connections.createSynapse(wrongMatchingSegment, prevActiveCells[1], .5)
tm.connections.createSynapse(wrongMatchingSegment, prevInactiveCell, .5)
before = copy.deepcopy(tm.connections)
tm.compute(prevActiveColumns, False)
tm.compute(activeColumns, False)
self.assertEqual(before, tm.connections)
def testDestroySegmentsThenReachLimit(self):
""" Destroy some segments then verify that the maxSegmentsPerCell is still
correctly applied.
"""
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=1,
activationThreshold=3,
initialPermanence=.50,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.02,
permanenceDecrement=.02,
predictedSegmentDecrement=0.0,
seed=42,
maxSegmentsPerCell=2)
segment1 = tm.createSegment(11)
segment2 = tm.createSegment(11)
self.assertEqual(2, tm.connections.numSegments())
tm.connections.destroySegment(segment1)
tm.connections.destroySegment(segment2)
self.assertEqual(0, tm.connections.numSegments())
tm.createSegment(11)
self.assertEqual(1, tm.connections.numSegments())
tm.createSegment(11)
self.assertEqual(2, tm.connections.numSegments())
segment3 = tm.createSegment(11)
self.assertEqual(2, tm.connections.numSegments(11))
self.assertEqual(2, tm.connections.numSegments())
def testReachSegmentLimitMultipleTimes(self):
""" Hit the maxSegmentsPerCell threshold multiple times. Make sure it
works more than once.
"""
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=1,
activationThreshold=3,
initialPermanence=.50,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.02,
permanenceDecrement=.02,
predictedSegmentDecrement=0.0,
seed=42,
maxSegmentsPerCell=2)
tm.createSegment(10)
self.assertEqual(1, tm.connections.numSegments())
tm.createSegment(10)
self.assertEqual(2, tm.connections.numSegments())
tm.createSegment(10)
self.assertEqual(2, tm.connections.numSegments())
tm.createSegment(10)
self.assertEqual(2, tm.connections.numSegments())
def testColumnForCell1D(self):
tm = TemporalMemory(
columnDimensions=[2048],
cellsPerColumn=5
)
self.assertEqual(tm.columnForCell(0), 0)
self.assertEqual(tm.columnForCell(4), 0)
self.assertEqual(tm.columnForCell(5), 1)
self.assertEqual(tm.columnForCell(10239), 2047)
def testColumnForCell2D(self):
tm = TemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=4
)
self.assertEqual(tm.columnForCell(0), 0)
self.assertEqual(tm.columnForCell(3), 0)
self.assertEqual(tm.columnForCell(4), 1)
self.assertEqual(tm.columnForCell(16383), 4095)
def testColumnForCellInvalidCell(self):
tm = TemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=4
)
try:
tm.columnForCell(16383)
except IndexError:
self.fail("IndexError raised unexpectedly")
args = [16384]
self.assertRaises(IndexError, tm.columnForCell, *args)
args = [-1]
self.assertRaises(IndexError, tm.columnForCell, *args)
def testCellsForColumn1D(self):
tm = TemporalMemory(
columnDimensions=[2048],
cellsPerColumn=5
)
expectedCells = [5, 6, 7, 8, 9]
self.assertEqual(tm.cellsForColumn(1), expectedCells)
def testCellsForColumn2D(self):
tm = TemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=4
)
expectedCells = [256, 257, 258, 259]
self.assertEqual(tm.cellsForColumn(64), expectedCells)
def testCellsForColumnInvalidColumn(self):
tm = TemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=4
)
try:
tm.cellsForColumn(4095)
except IndexError:
self.fail("IndexError raised unexpectedly")
args = [4096]
self.assertRaises(IndexError, tm.cellsForColumn, *args)
args = [-1]
self.assertRaises(IndexError, tm.cellsForColumn, *args)
def testNumberOfColumns(self):
tm = TemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=32
)
self.assertEqual(tm.numberOfColumns(), 64 * 64)
def testNumberOfCells(self):
tm = TemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=32
)
self.assertEqual(tm.numberOfCells(), 64 * 64 * 32)
def testMapCellsToColumns(self):
tm = TemporalMemory(
columnDimensions=[100],
cellsPerColumn=4
)
columnsForCells = tm.mapCellsToColumns(set([0, 1, 2, 5, 399]))
self.assertEqual(columnsForCells[0], set([0, 1, 2]))
self.assertEqual(columnsForCells[1], set([5]))
self.assertEqual(columnsForCells[99], set([399]))
def testMaxSegmentsPerCellGetter(self):
tm = TemporalMemory(
columnDimensions=[64,64],
cellsPerColumn=32,
maxSegmentsPerCell=200
)
self.assertEqual(tm.getMaxSegmentsPerCell(), 200)
def testMaxSynapsesPerSegmentGetter(self):
tm = TemporalMemory(
columnDimensions=[32,32],
cellsPerColumn=16,
maxSynapsesPerSegment=150
)
self.assertEqual(tm.getMaxSynapsesPerSegment(), 150)
def serializationTestPrepare(self, tm):
# Create an active segment and two matching segments.
# Destroy a few to exercise the code.
destroyMe1 = tm.createSegment(4)
tm.connections.destroySegment(destroyMe1)
activeSegment = tm.createSegment(4)
tm.connections.createSynapse(activeSegment, 0, 0.5)
tm.connections.createSynapse(activeSegment, 1, 0.5)
destroyMe2 = tm.connections.createSynapse(activeSegment, 42, 0.5)
tm.connections.destroySynapse(destroyMe2)
tm.connections.createSynapse(activeSegment, 2, 0.5)
tm.connections.createSynapse(activeSegment, 3, 0.5)
matchingSegment1 = tm.createSegment(8)
tm.connections.createSynapse(matchingSegment1, 0, 0.4)
tm.connections.createSynapse(matchingSegment1, 1, 0.4)
tm.connections.createSynapse(matchingSegment1, 2, 0.4)
matchingSegment2 = tm.createSegment(9)
tm.connections.createSynapse(matchingSegment2, 0, 0.4)
tm.connections.createSynapse(matchingSegment2, 1, 0.4)
tm.connections.createSynapse(matchingSegment2, 2, 0.4)
tm.connections.createSynapse(matchingSegment2, 3, 0.4)
tm.compute([0])
self.assertEqual(len(tm.getActiveSegments()), 1)
self.assertEqual(len(tm.getMatchingSegments()), 3)
def serializationTestVerify(self, tm):
# Activate 3 columns. One has an active segment, one has two matching
# segments, and one has none. One column should be predicted, the others
# should burst, there should be four segments total, and they should have
# the correct permanences and synapse counts.
prevWinnerCells = tm.getWinnerCells()
self.assertEqual(len(prevWinnerCells), 1)
tm.compute([1, 2, 3])
# Verify the correct cells were activated.
self.assertEqual(tm.getActiveCells(),
[4, 8, 9, 10, 11, 12, 13, 14, 15])
winnerCells = tm.getWinnerCells()
self.assertEqual(len(winnerCells), 3)
self.assertEqual(winnerCells[0], 4)
self.assertEqual(winnerCells[1], 9)
self.assertEqual(tm.connections.numSegments(), 4)
# Verify the active segment learned.
self.assertEqual(tm.connections.numSegments(4), 1)
activeSegment = tm.connections.segmentsForCell(4)[0]
syns1 = tm.connections.synapsesForSegment(activeSegment)
self.assertEqual(set([0, 1, 2, 3]),
set(s.presynapticCell for s in syns1))
for s in syns1:
self.assertAlmostEqual(s.permanence, 0.6)
# Verify the non-best matching segment is unchanged.
self.assertEqual(tm.connections.numSegments(8), 1)
matchingSegment1 = tm.connections.segmentsForCell(8)[0]
syns2 = tm.connections.synapsesForSegment(matchingSegment1)
self.assertEqual(set([0, 1, 2]),
set(s.presynapticCell for s in syns2))
for s in syns2:
self.assertAlmostEqual(s.permanence, 0.4)
# Verify the best matching segment learned.
self.assertEqual(tm.connections.numSegments(9), 1)
matchingSegment2 = tm.connections.segmentsForCell(9)[0]
syns3 = tm.connections.synapsesForSegment(matchingSegment2)
self.assertEqual(set([0, 1, 2, 3]),
set(s.presynapticCell for s in syns3))
for s in syns3:
self.assertAlmostEqual(s.permanence, 0.5)
# Verify the winner cell in the last column grew a segment.
winnerCell = winnerCells[2]
self.assertGreaterEqual(winnerCell, 12)
self.assertLess(winnerCell, 16)
self.assertEqual(tm.connections.numSegments(winnerCell), 1)
newSegment = tm.connections.segmentsForCell(winnerCell)[0]
syns4 = tm.connections.synapsesForSegment(newSegment)
self.assertEqual(set([prevWinnerCells[0]]),
set(s.presynapticCell for s in syns4))
for s in syns4:
self.assertAlmostEqual(s.permanence, 0.21)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
tm1 = TemporalMemory(
columnDimensions=(32,),
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=0.21,
connectedPermanence=0.50,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
predictedSegmentDecrement=0.0,
seed=42
)
self.serializationTestPrepare(tm1)
proto1 = TemporalMemoryProto_capnp.TemporalMemoryProto.new_message()
tm1.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = TemporalMemoryProto_capnp.TemporalMemoryProto.read(f)
# Load the deserialized proto
tm2 = TemporalMemory.read(proto2)
self.assertEqual(tm1, tm2)
self.serializationTestVerify(tm2)
@unittest.skip("Manually enable this when you want to use it.")
def testWriteTestFile(self):
tm = TemporalMemory(
columnDimensions=(32,),
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=0.21,
connectedPermanence=0.50,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
predictedSegmentDecrement=0.0,
seed=42
)
self.serializationTestPrepare(tm)
proto = TemporalMemoryProto_capnp.TemporalMemoryProto.new_message()
tm.write(proto)
with open("TemporalMemorySerializationWrite.tmp", "w") as f:
proto.write(f)
@unittest.skip("Manually enable this when you want to use it.")
def testReadTestFile(self):
with open("TemporalMemorySerializationWrite.tmp", "r") as f:
proto = TemporalMemoryProto_capnp.TemporalMemoryProto.read(f)
# Load the deserialized proto
tm = TemporalMemory.read(proto)
self.serializationTestVerify(tm)
if __name__ == '__main__':
unittest.main()
| 41,868 | Python | .py | 1,012 | 34.632411 | 80 | 0.717704 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,096 | spatial_pooler_boost_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/spatial_pooler_boost_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import time
import numpy
import unittest2 as unittest
from nupic.support.unittesthelpers.algorithm_test_helpers \
import CreateSP
from nupic.bindings.math import GetNTAReal
uintType = "uint32"
# set a single seed for running both implementations
SEED = int((time.time()%10000)*10)
def _computeOverlap(x, y):
"""
Given two binary arrays, compute their overlap. The overlap is the number
of bits where x[i] and y[i] are both 1
"""
return ((x + y) == 2).sum()
def _areAllSDRsUnique(sdrDict):
"""Return True iff all the SDR's in the dict are unique."""
for k1, v1 in sdrDict.iteritems():
for k2, v2 in sdrDict.iteritems():
# Return false if two different keys have identical SDR's
if (k2 != k1) and ((v1 == v2).sum() == v1.size):
return False
return True
class SpatialPoolerBoostTest(unittest.TestCase):
"""
Test boosting.
The test is constructed as follows: we construct a set of 5 known inputs. Two
of the input patterns have 50% overlap while all other combinations have 0%
overlap. Each input pattern has 20 bits on to ensure reasonable overlap with
almost all columns.
SP parameters: The SP is set to have 600 columns with 10% output sparsity.
This ensures that the 5 inputs cannot use up all the columns. Yet we still can
have a reasonable number of winning columns at each step in order to test
overlap properties. boostStrength is set to 10 so that some boosted columns are
guaranteed to win eventually but not necessarily quickly. potentialPct is set
to 0.9 to ensure all columns have at least some overlap with at least one
input bit. Thus, when sufficiently boosted, every column should become a
winner at some point. We set permanence increment and decrement to 0 so that
winning columns don't change unless they have been boosted.
Learning is OFF for Phase 1 & 4 and ON for Phase 2 & 3
Phase 1: Run spatial pooler on the dataset with learning off to get a baseline
The boosting factors should be all ones in this phase. A significant fraction
of the columns will not be used at all. There will be significant overlap
between the first two inputs.
Phase 2: Learning is on over the next 10 iterations. During this phase,
columns that are active frequently will have low boost factors, and columns
that are not active enough will have high boost factors. All columns should
be active at some point in phase 2.
Phase 3: Run one more batch on with learning On. Because of the artificially
induced thrashing behavior in this test due to boosting, all the inputs should
now have pretty distinct patterns.
Phase 4: Run spatial pooler with learning off. Make sure boosting factors
do not change when learning is off
"""
def setUp(self):
"""
Set various constants. Create the input patterns and the spatial pooler
"""
self.inputSize = 90
self.columnDimensions = 600
# Create a set of input vectors, x
# B,C,D don't overlap at all with other patterns
self.x = numpy.zeros((5, self.inputSize), dtype=uintType)
self.x[0, 0:20] = 1 # Input pattern A
self.x[1, 10:30] = 1 # Input pattern A' (half the bits overlap with A)
self.x[2, 30:50] = 1 # Input pattern B (no overlap with others)
self.x[3, 50:70] = 1 # Input pattern C (no overlap with others)
self.x[4, 70:90] = 1 # Input pattern D (no overlap with others)
# For each column, this will contain the last iteration number where that
# column was a winner
self.winningIteration = numpy.zeros(self.columnDimensions)
# For each input vector i, lastSDR[i] contains the most recent SDR output
# by the SP.
self.lastSDR = {}
self.spImplementation = "None"
self.sp = None
# Setup the SP creation parameters we will use
self.params = {
'inputDimensions': [self.inputSize],
'columnDimensions': [self.columnDimensions],
'potentialRadius': self.inputSize,
'potentialPct': 0.9,
'globalInhibition': True,
'numActiveColumnsPerInhArea': 60,
'synPermActiveInc': 0.0,
'synPermInactiveDec': 0.0,
'dutyCyclePeriod': 10,
'boostStrength': 10.0,
'seed': SEED,
}
print "SP seed set to:", self.params['seed']
def debugPrint(self):
"""
Helpful debug print statements while debugging this test.
"""
activeDutyCycle = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getActiveDutyCycles(activeDutyCycle)
boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getBoostFactors(boost)
print "\n--------- ITERATION", (
self.sp.getIterationNum() ),"-----------------------"
print "SP implementation:", self.spImplementation
print "Learning iteration:",
print "Max/min active duty cycle:", (
activeDutyCycle.max(), activeDutyCycle.min() )
print "Average non-zero active duty cycle:", (
activeDutyCycle[activeDutyCycle>0].mean() )
print "Active duty cycle", activeDutyCycle
print
print "Boost factor for sp:", boost
print
print "Last winning iteration for each column"
print self.winningIteration
print "Number of columns that have won at some point:", (
self.columnDimensions - (self.winningIteration==0).sum() )
def verifySDRProperties(self):
"""
Verify that all SDRs have the properties desired for this test.
The bounds for checking overlap are set fairly loosely here since there is
some variance due to randomness and the artificial parameters used in this
test.
"""
# Verify that all SDR's are unique
self.assertTrue(_areAllSDRsUnique(self.lastSDR), "All SDR's are not unique")
# Verify that the first two SDR's have some overlap.
self.assertGreater(_computeOverlap(self.lastSDR[0], self.lastSDR[1]), 9,
"First two SDR's don't overlap much")
# Verify the last three SDR's have low overlap with everyone else.
for i in [2, 3, 4]:
for j in range(5):
if (i!=j):
self.assertLess(_computeOverlap(self.lastSDR[i], self.lastSDR[j]),
18, "One of the last three SDRs has high overlap")
def boostTestPhase1(self):
y = numpy.zeros(self.columnDimensions, dtype = uintType)
# Do one batch through the input patterns while learning is Off
for idx, v in enumerate(self.x):
y.fill(0)
self.sp.compute(v, False, y)
self.winningIteration[y.nonzero()[0]] = self.sp.getIterationLearnNum()
self.lastSDR[idx] = y.copy()
# The boost factor for all columns should be at 1.
boost = numpy.zeros(self.columnDimensions, dtype = GetNTAReal())
self.sp.getBoostFactors(boost)
self.assertEqual((boost==1).sum(), self.columnDimensions,
"Boost factors are not all 1")
# At least half of the columns should have never been active.
self.assertGreaterEqual((self.winningIteration==0).sum(),
self.columnDimensions/2, "More than half of the columns have been active")
self.verifySDRProperties()
def boostTestPhase2(self):
y = numpy.zeros(self.columnDimensions, dtype = uintType)
# Do 9 training batch through the input patterns
for _ in range(10):
for idx, v in enumerate(self.x):
y.fill(0)
self.sp.compute(v, True, y)
self.winningIteration[y.nonzero()[0]] = self.sp.getIterationLearnNum()
self.lastSDR[idx] = y.copy()
# All the never-active columns should have duty cycle of 0
dutyCycles = numpy.zeros(self.columnDimensions, dtype = GetNTAReal())
self.sp.getActiveDutyCycles(dutyCycles)
self.assertEqual(dutyCycles[self.winningIteration == 0].sum(), 0,
"Inactive columns have positive duty cycle.")
boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getBoostFactors(boost)
self.assertLessEqual(numpy.max(boost[numpy.where(dutyCycles>0.1)]), 1.0,
"Strongly active columns have high boost factors")
self.assertGreaterEqual(numpy.min(boost[numpy.where(dutyCycles<0.1)]), 1.0,
"Weakly active columns have low boost factors")
# By now, every column should have been sufficiently boosted to win at least
# once. The number of columns that have never won should now be 0
numLosersAfter = (self.winningIteration == 0).sum()
self.assertEqual(numLosersAfter, 0)
# Because of the artificially induced thrashing, even the first two patterns
# should have low overlap. Verify that the first two SDR's now have little
# overlap
self.assertLess(_computeOverlap(self.lastSDR[0], self.lastSDR[1]), 7,
"First two SDR's overlap significantly when they "
"shouldn't")
def boostTestPhase3(self):
# Do one more training batches through the input patterns
y = numpy.zeros(self.columnDimensions, dtype = uintType)
for idx, v in enumerate(self.x):
y.fill(0)
self.sp.compute(v, True, y)
self.winningIteration[y.nonzero()[0]] = self.sp.getIterationLearnNum()
self.lastSDR[idx] = y.copy()
# By now, every column should have been sufficiently boosted to win at least
# once. The number of columns that have never won should now be 0
numLosersAfter = (self.winningIteration==0).sum()
self.assertEqual(numLosersAfter, 0)
# Because of the artificially induced thrashing, even the first two patterns
# should have low overlap. Verify that the first two SDR's now have little
# overlap
self.assertLess(_computeOverlap(self.lastSDR[0], self.lastSDR[1]), 7,
"First two SDR's overlap significantly when they "
"shouldn't")
def boostTestPhase4(self):
boostAtBeg = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getBoostFactors(boostAtBeg)
# Do one more iteration through the input patterns with learning OFF
y = numpy.zeros(self.columnDimensions, dtype=uintType)
for _, v in enumerate(self.x):
y.fill(0)
self.sp.compute(v, False, y)
boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getBoostFactors(boost)
self.assertEqual(boost.sum(), boostAtBeg.sum(),
"Boost factors changed when learning is off")
def boostTestLoop(self, imp):
"""Main test loop."""
self.sp = CreateSP(imp, self.params)
self.spImplementation = imp
self.winningIteration.fill(0)
self.lastSDR = {}
self.boostTestPhase1()
self.boostTestPhase2()
self.boostTestPhase3()
self.boostTestPhase4()
def testBoostingPY(self):
self.boostTestLoop("py")
def testBoostingCPP(self):
self.boostTestLoop("cpp")
if __name__ == "__main__":
unittest.main()
| 11,879 | Python | .py | 246 | 42.686992 | 81 | 0.694112 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,097 | inhibition_object_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/inhibition_object_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Test if the firing number of coincidences after inhibition equals spatial pooler
numActiveColumnsPerInhArea.
TODO: Fix this up to be more unit testy.
"""
import numpy
import unittest2 as unittest
from nupic.algorithms.spatial_pooler import SpatialPooler
numpy.random.seed(100)
class InhibitionObjectTest(unittest.TestCase):
@unittest.skip("Currently fails due to switch from FDRCSpatial2 to SpatialPooler."
"The new SP doesn't have explicit methods to get inhibition.")
# TODO: See https://github.com/numenta/nupic/issues/2071
def testInhibition(self):
"""
Test if the firing number of coincidences after inhibition
equals spatial pooler numActiveColumnsPerInhArea.
"""
# Miscellaneous variables:
# n, w: n, w of encoders
# inputLen: Length of binary input
# synPermConnected: Spatial pooler synPermConnected
# synPermActiveInc: Spatial pooler synPermActiveInc
# connectPct: Initial connect percentage of permanences
# columnDimensions: Number of spatial pooler coincidences
# numActiveColumnsPerInhArea: Spatial pooler numActiveColumnsPerInhArea
# stimulusThreshold: Spatial pooler stimulusThreshold
# spSeed: Spatial pooler for initial permanences
# stimulusThresholdInh: Parameter for inhibition, default value 0.00001
# kDutyCycleFactor: kDutyCycleFactor for dutyCycleTieBreaker in
# Inhibition
# spVerbosity: Verbosity to print other sp initial parameters
# testIter: Testing iterations
n = 100
w = 15
inputLen = 300
columnDimensions = 2048
numActiveColumnsPerInhArea = 40
stimulusThreshold = 0
spSeed = 1956
stimulusThresholdInh = 0.00001
kDutyCycleFactor = 0.01
spVerbosity = 0
testIter = 100
spTest = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, inputLen),
potentialRadius=inputLen / 2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
stimulusThreshold=stimulusThreshold,
seed=spSeed
)
initialPermanence = spTest._initialPermanence()
spTest._masterPotentialM, spTest._masterPermanenceM = (
spTest._makeMasterCoincidences(spTest.numCloneMasters,
spTest._coincRFShape,
spTest.potentialPct,
initialPermanence,
spTest.random))
spTest._updateInhibitionObj()
boostFactors = numpy.ones(columnDimensions)
for i in range(testIter):
spTest._iterNum = i
# random binary input
input_ = numpy.zeros((1, inputLen))
nonzero = numpy.random.random(inputLen)
input_[0][numpy.where (nonzero < float(w)/float(n))] = 1
# overlap step
spTest._computeOverlapsFP(input_,
stimulusThreshold=spTest.stimulusThreshold)
spTest._overlaps *= boostFactors
onCellIndices = numpy.where(spTest._overlaps > 0)
spTest._onCells.fill(0)
spTest._onCells[onCellIndices] = 1
denseOn = spTest._onCells
# update _dutyCycleBeforeInh
spTest.dutyCyclePeriod = min(i + 1, 1000)
spTest._dutyCycleBeforeInh = (
(spTest.dutyCyclePeriod - 1) *
spTest._dutyCycleBeforeInh +denseOn) / spTest.dutyCyclePeriod
dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy()
dutyCycleTieBreaker *= kDutyCycleFactor
# inhibition step
numOn = spTest._inhibitionObj.compute(
spTest._overlaps + dutyCycleTieBreaker, spTest._onCellIndices,
stimulusThresholdInh, # stimulusThresholdInh
max(spTest._overlaps)/1000, # addToWinners
)
# update _dutyCycleAfterInh
spTest._onCells.fill(0)
onCellIndices = spTest._onCellIndices[0:numOn]
spTest._onCells[onCellIndices] = 1
denseOn = spTest._onCells
spTest._dutyCycleAfterInh = (((spTest.dutyCyclePeriod-1) *
spTest._dutyCycleAfterInh + denseOn) /
spTest.dutyCyclePeriod)
# learning step
spTest._adaptSynapses(onCellIndices, [], input_)
# update boostFactor
spTest._updateBoostFactors()
boostFactors = spTest._firingBoostFactors
# update dutyCycle and boost
if ((spTest._iterNum+1) % 50) == 0:
spTest._updateInhibitionObj()
spTest._updateMinDutyCycles(
spTest._dutyCycleBeforeInh,
spTest.minPctDutyCycleBeforeInh,
spTest._minDutyCycleBeforeInh)
spTest._updateMinDutyCycles(
spTest._dutyCycleAfterInh,
spTest.minPctDutyCycleAfterInh,
spTest._minDutyCycleAfterInh)
# test numOn and spTest.numActiveColumnsPerInhArea
self.assertEqual(numOn, spTest.numActiveColumnsPerInhArea,
"Error at input %s, actual numOn are: %i, "
"numActivePerInhAre is: %s" % (
i, numOn, numActiveColumnsPerInhArea))
if __name__=="__main__":
unittest.main()
| 6,368 | Python | .py | 140 | 36.607143 | 84 | 0.64486 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,098 | spatial_pooler_compatability_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/spatial_pooler_compatability_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import cPickle as pickle
import numpy
import time
import traceback
import unittest2 as unittest
from nupic.bindings.algorithms import SpatialPooler as CPPSpatialPooler
from nupic.bindings.math import GetNTAReal, Random as NupicRandom
from nupic.algorithms.spatial_pooler import SpatialPooler as PySpatialPooler
from nupic.support.unittesthelpers.algorithm_test_helpers \
import getNumpyRandomGenerator, CreateSP, convertPermanences
realType = GetNTAReal()
uintType = "uint32"
numRecords = 100
class SpatialPoolerCompatibilityTest(unittest.TestCase):
"""
Tests to ensure that the PY and CPP implementations of the spatial pooler
are functionally identical.
"""
def setUp(self):
# Set to 1 for more verbose debugging output
self.verbosity = 1
def assertListAlmostEqual(self, alist, blist):
self.assertEqual(len(alist), len(blist))
for a, b in zip(alist, blist):
diff = abs(a - b)
self.assertLess(diff, 1e-4)
def compare(self, pySp, cppSp):
self.assertAlmostEqual(pySp.getNumColumns(),
cppSp.getNumColumns())
self.assertAlmostEqual(pySp.getNumInputs(),
cppSp.getNumInputs())
self.assertAlmostEqual(pySp.getPotentialRadius(),
cppSp.getPotentialRadius())
self.assertAlmostEqual(pySp.getPotentialPct(),
cppSp.getPotentialPct())
self.assertAlmostEqual(pySp.getGlobalInhibition(),
cppSp.getGlobalInhibition())
self.assertAlmostEqual(pySp.getNumActiveColumnsPerInhArea(),
cppSp.getNumActiveColumnsPerInhArea())
self.assertAlmostEqual(pySp.getLocalAreaDensity(),
cppSp.getLocalAreaDensity())
self.assertAlmostEqual(pySp.getStimulusThreshold(),
cppSp.getStimulusThreshold())
self.assertAlmostEqual(pySp.getInhibitionRadius(),
cppSp.getInhibitionRadius())
self.assertAlmostEqual(pySp.getDutyCyclePeriod(),
cppSp.getDutyCyclePeriod())
self.assertAlmostEqual(pySp.getBoostStrength(),
cppSp.getBoostStrength())
self.assertAlmostEqual(pySp.getIterationNum(),
cppSp.getIterationNum())
self.assertAlmostEqual(pySp.getIterationLearnNum(),
cppSp.getIterationLearnNum())
self.assertAlmostEqual(pySp.getSpVerbosity(),
cppSp.getSpVerbosity())
self.assertAlmostEqual(pySp.getUpdatePeriod(),
cppSp.getUpdatePeriod())
self.assertAlmostEqual(pySp.getSynPermTrimThreshold(),
cppSp.getSynPermTrimThreshold())
self.assertAlmostEqual(pySp.getSynPermActiveInc(),
cppSp.getSynPermActiveInc())
self.assertAlmostEqual(pySp.getSynPermInactiveDec(),
cppSp.getSynPermInactiveDec())
self.assertAlmostEqual(pySp.getSynPermBelowStimulusInc(),
cppSp.getSynPermBelowStimulusInc())
self.assertAlmostEqual(pySp.getSynPermConnected(),
cppSp.getSynPermConnected())
self.assertAlmostEqual(pySp.getMinPctOverlapDutyCycles(),
cppSp.getMinPctOverlapDutyCycles())
numColumns = pySp.getNumColumns()
numInputs = pySp.getNumInputs()
pyBoost = numpy.zeros(numColumns).astype(realType)
cppBoost = numpy.zeros(numColumns).astype(realType)
pySp.getBoostFactors(pyBoost)
cppSp.getBoostFactors(cppBoost)
self.assertListAlmostEqual(list(pyBoost), list(cppBoost))
pyOverlap = numpy.zeros(numColumns).astype(realType)
cppOverlap = numpy.zeros(numColumns).astype(realType)
pySp.getOverlapDutyCycles(pyOverlap)
cppSp.getOverlapDutyCycles(cppOverlap)
self.assertListAlmostEqual(list(pyOverlap), list(cppOverlap))
pyActive = numpy.zeros(numColumns).astype(realType)
cppActive = numpy.zeros(numColumns).astype(realType)
pySp.getActiveDutyCycles(pyActive)
cppSp.getActiveDutyCycles(cppActive)
self.assertListAlmostEqual(list(pyActive), list(cppActive))
pyMinOverlap = numpy.zeros(numColumns).astype(realType)
cppMinOverlap = numpy.zeros(numColumns).astype(realType)
pySp.getMinOverlapDutyCycles(pyMinOverlap)
cppSp.getMinOverlapDutyCycles(cppMinOverlap)
self.assertListAlmostEqual(list(pyMinOverlap), list(cppMinOverlap))
for i in xrange(pySp.getNumColumns()):
if self.verbosity > 2: print "Column:",i
pyPot = numpy.zeros(numInputs).astype(uintType)
cppPot = numpy.zeros(numInputs).astype(uintType)
pySp.getPotential(i, pyPot)
cppSp.getPotential(i, cppPot)
self.assertListEqual(list(pyPot),list(cppPot))
pyPerm = numpy.zeros(numInputs).astype(realType)
cppPerm = numpy.zeros(numInputs).astype(realType)
pySp.getPermanence(i, pyPerm)
cppSp.getPermanence(i, cppPerm)
self.assertListAlmostEqual(list(pyPerm),list(cppPerm))
pyCon = numpy.zeros(numInputs).astype(uintType)
cppCon = numpy.zeros(numInputs).astype(uintType)
pySp.getConnectedSynapses(i, pyCon)
cppSp.getConnectedSynapses(i, cppCon)
self.assertListEqual(list(pyCon), list(cppCon))
pyConCounts = numpy.zeros(numColumns).astype(uintType)
cppConCounts = numpy.zeros(numColumns).astype(uintType)
pySp.getConnectedCounts(pyConCounts)
cppSp.getConnectedCounts(cppConCounts)
self.assertListEqual(list(pyConCounts), list(cppConCounts))
def runSideBySide(self, params, seed = None,
learnMode = None,
convertEveryIteration = False):
"""
Run the PY and CPP implementations side by side on random inputs.
If seed is None a random seed will be chosen based on time, otherwise
the fixed seed will be used.
If learnMode is None learning will be randomly turned on and off.
If it is False or True then set it accordingly.
If convertEveryIteration is True, the CPP will be copied from the PY
instance on every iteration just before each compute.
"""
randomState = getNumpyRandomGenerator(seed)
cppSp = CreateSP("cpp", params)
pySp = CreateSP("py", params)
self.compare(pySp, cppSp)
numColumns = pySp.getNumColumns()
numInputs = pySp.getNumInputs()
threshold = 0.8
inputMatrix = (
randomState.rand(numRecords,numInputs) > threshold).astype(uintType)
# Run side by side for numRecords iterations
for i in xrange(numRecords):
if learnMode is None:
learn = (randomState.rand() > 0.5)
else:
learn = learnMode
if self.verbosity > 1:
print "Iteration:",i,"learn=",learn
PyActiveArray = numpy.zeros(numColumns).astype(uintType)
CppActiveArray = numpy.zeros(numColumns).astype(uintType)
inputVector = inputMatrix[i,:]
pySp.compute(inputVector, learn, PyActiveArray)
cppSp.compute(inputVector, learn, CppActiveArray)
self.assertListEqual(list(PyActiveArray), list(CppActiveArray))
self.compare(pySp,cppSp)
# The boost factors were similar enough to get this far.
# Now make them completely equal so that small variations don't cause
# columns to have slightly higher boosted overlaps.
cppBoostFactors = numpy.zeros(numColumns, dtype=realType)
cppSp.getBoostFactors(cppBoostFactors)
pySp.setBoostFactors(cppBoostFactors)
# The permanence values for the two implementations drift ever so slowly
# over time due to numerical precision issues. This occasionally causes
# different permanences to be connected. By transferring the permanence
# values every so often, we can avoid this drift but still check that
# the logic is applied equally for both implementations.
if convertEveryIteration or ((i+1)%10 == 0):
convertPermanences(pySp, cppSp)
def runSerialize(self, imp, params, seed = None):
randomState = getNumpyRandomGenerator(seed)
sp1 = CreateSP(imp, params)
numColumns = sp1.getNumColumns()
numInputs = sp1.getNumInputs()
threshold = 0.8
inputMatrix = (
randomState.rand(numRecords,numInputs) > threshold).astype(uintType)
for i in xrange(numRecords/2):
activeArray = numpy.zeros(numColumns).astype(uintType)
inputVector = inputMatrix[i,:]
learn = (randomState.rand() > 0.5)
sp1.compute(inputVector, learn, activeArray)
sp2 = pickle.loads(pickle.dumps(sp1))
for i in xrange(numRecords/2+1,numRecords):
activeArray1 = numpy.zeros(numColumns).astype(uintType)
activeArray2 = numpy.zeros(numColumns).astype(uintType)
inputVector = inputMatrix[i,:]
learn = (randomState.rand() > 0.5)
sp1.compute(inputVector, learn, activeArray1)
sp2.compute(inputVector, learn, activeArray2)
self.assertListEqual(list(activeArray1), list(activeArray2))
def testCompatibility1(self):
params = {
"inputDimensions": [4,4],
"columnDimensions": [5,3],
"potentialRadius": 20,
"potentialPct": 0.5,
"globalInhibition": True,
"localAreaDensity": 0,
"numActiveColumnsPerInhArea": 5,
"stimulusThreshold": 0,
"synPermInactiveDec": 0.01,
"synPermActiveInc": 0.1,
"synPermConnected": 0.10,
"minPctOverlapDutyCycle": 0.001,
"dutyCyclePeriod": 30,
"boostStrength": 10.0,
"seed": 4,
"spVerbosity": 0
}
# This seed used to cause problems if learnMode is set to None
self.runSideBySide(params, seed = 63862)
# These seeds used to fail
self.runSideBySide(params, seed = 62605)
self.runSideBySide(params, seed = 30440)
self.runSideBySide(params, seed = 49457)
self.runSideBySide(params)
def testCompatibilityNoLearn(self):
params = {
"inputDimensions": [4,4],
"columnDimensions": [5,3],
"potentialRadius": 20,
"potentialPct": 0.5,
"globalInhibition": True,
"localAreaDensity": 0,
"numActiveColumnsPerInhArea": 5,
"stimulusThreshold": 0,
"synPermInactiveDec": 0.01,
"synPermActiveInc": 0.1,
"synPermConnected": 0.10,
"minPctOverlapDutyCycle": 0.001,
"dutyCyclePeriod": 30,
"boostStrength": 10.0,
"seed": 4,
"spVerbosity": 0
}
self.runSideBySide(params, seed = None, learnMode = False)
def testCompatibility2(self):
params = {
"inputDimensions": [12,7],
"columnDimensions": [4,15],
"potentialRadius": 22,
"potentialPct": 0.3,
"globalInhibition": False,
"localAreaDensity": 0,
"numActiveColumnsPerInhArea": 5,
"stimulusThreshold": 2,
"synPermInactiveDec": 0.04,
"synPermActiveInc": 0.14,
"synPermConnected": 0.178,
"minPctOverlapDutyCycle": 0.021,
"dutyCyclePeriod": 20,
"boostStrength": 11.0,
"seed": 6,
"spVerbosity": 0
}
self.runSideBySide(params, convertEveryIteration=True, seed=63862)
def testCompatibility3(self):
params = {
"inputDimensions": [2,4,5],
"columnDimensions": [4,3,3],
"potentialRadius": 30,
"potentialPct": 0.7,
"globalInhibition": False,
"localAreaDensity": 0.23,
"numActiveColumnsPerInhArea": 0,
"stimulusThreshold": 2,
"synPermInactiveDec": 0.02,
"synPermActiveInc": 0.1,
"synPermConnected": 0.12,
"minPctOverlapDutyCycle": 0.011,
"dutyCyclePeriod": 25,
"boostStrength": 11.0,
"seed": 19,
"spVerbosity": 0
}
self.runSideBySide(params, convertEveryIteration=True, seed=63862)
def testSerialization(self):
params = {
'inputDimensions' : [2,4,5],
'columnDimensions' : [4,3,3],
'potentialRadius' : 30,
'potentialPct' : 0.7,
'globalInhibition' : False,
'localAreaDensity' : 0.23,
'numActiveColumnsPerInhArea' : 0,
'stimulusThreshold' : 2,
'synPermInactiveDec' : 0.02,
'synPermActiveInc' : 0.1,
'synPermConnected' : 0.12,
'minPctOverlapDutyCycle' : 0.011,
'dutyCyclePeriod' : 25,
'boostStrength' : 11.0,
'seed' : 19,
'spVerbosity' : 0
}
sp1 = CreateSP("py", params)
sp2 = pickle.loads(pickle.dumps(sp1))
self.compare(sp1, sp2)
sp1 = CreateSP("cpp", params)
sp2 = pickle.loads(pickle.dumps(sp1))
self.compare(sp1, sp2)
def testSerializationRun(self):
params = {
'inputDimensions' : [2,4,5],
'columnDimensions' : [4,3,3],
'potentialRadius' : 30,
'potentialPct' : 0.7,
'globalInhibition' : False,
'localAreaDensity' : 0.23,
'numActiveColumnsPerInhArea' : 0,
'stimulusThreshold' : 2,
'synPermInactiveDec' : 0.02,
'synPermActiveInc' : 0.1,
'synPermConnected' : 0.12,
'minPctOverlapDutyCycle' : 0.011,
'dutyCyclePeriod' : 25,
'boostStrength' : 11.0,
'seed' : 19,
'spVerbosity' : 0
}
self.runSerialize("py", params)
self.runSerialize("cpp", params)
def testInhibitColumnsGlobal(self):
params = {
"inputDimensions": [512],
"columnDimensions": [512],
"globalInhibition": True,
"numActiveColumnsPerInhArea": 40,
"seed": 19
}
sp1 = CreateSP("py", params)
sp2 = CreateSP("cpp", params)
for _ in range(100):
overlaps = numpy.random.randint(10, size=512).astype(realType)
columns1 = sp1._inhibitColumns(overlaps)
columns2 = sp2._inhibitColumns(overlaps)
self.assertEqual(set(columns1), set(columns2))
@unittest.skip("Currently fails due to non-fixed randomness in C++ SP.")
def testCompatibilityCppPyDirectCall1D(self):
"""Check SP implementations have same behavior with 1D input."""
pySp = PySpatialPooler(
inputDimensions=[121], columnDimensions=[300])
cppSp = CPPSpatialPooler(
inputDimensions=[121], columnDimensions=[300])
data = numpy.zeros([121], dtype=uintType)
for i in xrange(21):
data[i] = 1
nCols = 300
d1 = numpy.zeros(nCols, dtype=uintType)
d2 = numpy.zeros(nCols, dtype=uintType)
pySp.compute(data, True, d1) # learn
cppSp.compute(data, True, d2)
d1 = d1.nonzero()[0].tolist()
d2 = d2.nonzero()[0].tolist()
self.assertListEqual(
d1, d2, "SP outputs are not equal: \n%s \n%s" % (str(d1), str(d2)))
@unittest.skip("Currently fails due to non-fixed randomness in C++ SP.")
def testCompatibilityCppPyDirectCall2D(self):
"""Check SP implementations have same behavior with 2D input."""
pySp = PySpatialPooler(
inputDimensions=[121, 1], columnDimensions=[30, 30])
cppSp = CPPSpatialPooler(
inputDimensions=[121, 1], columnDimensions=[30, 30])
data = numpy.zeros([121, 1], dtype=uintType)
for i in xrange(21):
data[i][0] = 1
nCols = 900
d1 = numpy.zeros(nCols, dtype=uintType)
d2 = numpy.zeros(nCols, dtype=uintType)
pySp.compute(data, True, d1) # learn
cppSp.compute(data, True, d2)
d1 = d1.nonzero()[0].tolist()
d2 = d2.nonzero()[0].tolist()
self.assertListEqual(
d1, d2, "SP outputs are not equal: \n%s \n%s" % (str(d1), str(d2)))
if __name__ == "__main__":
unittest.main()
| 16,220 | Python | .py | 391 | 34.519182 | 78 | 0.677645 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,099 | sp_learn_inference_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/sp_learn_inference_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This test intermixes learning and inference calls. It checks that inserting
random inference calls have no effect on learning.
TODO: implement an SP Diff routine. That should be fun!
"""
import cPickle as pickle
import numpy as np
import random
import time
import unittest2 as unittest
from nupic.bindings.math import GetNTAReal
from nupic.algorithms.fdrutilities import spDiff
from nupic.algorithms.spatial_pooler import SpatialPooler
realDType = GetNTAReal()
class SPLearnInferenceTest(unittest.TestCase):
"""Test to check that inference calls do not affect learning."""
def _runLearnInference(self,
n=30,
w=15,
columnDimensions=2048,
numActiveColumnsPerInhArea=40,
spSeed=1951,
spVerbosity=0,
numTrainingRecords=100,
seed=42):
# Instantiate two identical spatial pooler. One will be used only for
# learning. The other will be trained with identical records, but with
# random inference calls thrown in
spLearnOnly = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, n),
potentialRadius=n/2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
seed=spSeed,
synPermInactiveDec=0.01,
synPermActiveInc=0.2,
synPermConnected=0.11,)
spLearnInfer = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, n),
potentialRadius=n/2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
seed=spSeed,
synPermInactiveDec=0.01,
synPermActiveInc=0.2,
synPermConnected=0.11,)
random.seed(seed)
np.random.seed(seed)
# Build up training set with numTrainingRecords patterns
inputs = [] # holds post-encoded input patterns
for i in xrange(numTrainingRecords):
inputVector = np.zeros(n, dtype=realDType)
inputVector [random.sample(xrange(n), w)] = 1
inputs.append(inputVector)
# Train each SP with identical inputs
startTime = time.time()
random.seed(seed)
np.random.seed(seed)
for i in xrange(numTrainingRecords):
if spVerbosity > 0:
print "Input #%d" % i
# TODO: See https://github.com/numenta/nupic/issues/2072
encodedInput = inputs[i]
decodedOutput = np.zeros(columnDimensions)
spLearnOnly.compute(encodedInput, learn=True, activeArray=decodedOutput)
random.seed(seed)
np.random.seed(seed)
for i in xrange(numTrainingRecords):
if spVerbosity > 0:
print "Input #%d" % i
# TODO: See https://github.com/numenta/nupic/issues/2072
encodedInput = inputs[i]
decodedOutput = np.zeros(columnDimensions)
spLearnInfer.compute(encodedInput, learn=True, activeArray=decodedOutput)
print "\nElapsed time: %.2f seconds\n" % (time.time() - startTime)
# Test that both SP"s are identical by checking learning stats
# A more in depth test would check all the coincidences, duty cycles, etc.
# ala tpDiff
# Edit: spDiff has been written as an in depth tester of the spatial pooler
learnOnlyStats = spLearnOnly.getLearningStats()
learnInferStats = spLearnInfer.getLearningStats()
success = True
# Check that the two spatial poolers are equivalent after the same training.
success = success and spDiff(spLearnInfer, spLearnOnly)
self.assertTrue(success)
# Make sure that the pickled and loaded SPs are equivalent.
spPickle = pickle.dumps(spLearnOnly, protocol=0)
spLearnOnlyLoaded = pickle.loads(spPickle)
success = success and spDiff(spLearnOnly, spLearnOnlyLoaded)
self.assertTrue(success)
for k in learnOnlyStats.keys():
if learnOnlyStats[k] != learnInferStats[k]:
success = False
print "Stat", k, "is different:", learnOnlyStats[k], learnInferStats[k]
self.assertTrue(success)
if success:
print "Test succeeded"
@unittest.skip("Currently fails due to switch from FDRCSpatial2 to SpatialPooler."
"The new SP doesn't have explicit methods to get inference.")
# TODO: See https://github.com/numenta/nupic/issues/2072
def testLearnInference(self):
self._runLearnInference(n=50, w=15)
if __name__ == "__main__":
unittest.main()
| 5,454 | Python | .py | 126 | 36.952381 | 84 | 0.688667 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |