id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25,800 | description.py | numenta_nupic-legacy/examples/opf/experiments/spatial_classification/scalar_0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/scalar_0.csv'),
'errorMetric': 'aae',
'modelParams': {
'sensorParams': {
'verbosity': 0,
'encoders': {
'field1': {
'clipInput': True,
'fieldname': u'field1',
'maxval': 0.1,
'minval': 0.0,
'n': 211,
'name': u'field1',
'type': 'ScalarEncoder',
'w': 21
},
'classification': {
'classifierOnly': True,
'clipInput': True,
'fieldname': u'classification',
'maxval': 1.0,
'minval': 0.0,
'n': 211,
'name': u'classification',
'type': 'ScalarEncoder',
'w': 21
},
},
},
'clParams': {
'verbosity': 0,
},
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 2,135 | Python | .py | 63 | 28.190476 | 78 | 0.585389 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,801 | description.py | numenta_nupic-legacy/examples/opf/experiments/spatial_classification/base/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalClassification',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'field1': {
'fieldname': u'field1',
'n': 121,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21},
u'classification': {
'classifierOnly': True,
'fieldname': u'classification',
'n': 121,
'name': u'classification',
'type': 'SDRCategoryEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': False,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : False,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.25,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '0',
},
'anomalyParams': {
u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None
},
'trainSPNetOnlyIfRequested': False,
},
'dataSource': 'fillInBySubExperiment',
'errorMetric': 'fillInBySubExperiment'
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : { u'info': u'testSpatialClassification',
u'streams': [ { u'columns': [u'*'],
u'info': u'spatialClassification',
u'source': config['dataSource']}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'classification', u'predictionSteps': [0]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field='classification', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': config['errorMetric'],
'window': 100,
'steps': 0}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 14,796 | Python | .py | 322 | 36.736025 | 108 | 0.629262 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,802 | description.py | numenta_nupic-legacy/examples/opf/experiments/spatial_classification/category_0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_0.csv'),
'errorMetric': 'avg_err',
'modelParams': {
'sensorParams': { 'verbosity': 0},
'clParams': {
'verbosity': 0,
},
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,551 | Python | .py | 38 | 37.894737 | 78 | 0.658925 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,803 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/2field_many_balanced/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,806 | Python | .py | 336 | 38.553571 | 110 | 0.647063 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,804 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/10field_few2_skewed/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
'f2': dict(fieldname='f2', n=100, name='f2', type='SDRCategoryEncoder', w=21),
'f3': dict(fieldname='f3', n=100, name='f3', type='SDRCategoryEncoder', w=21),
'f4': dict(fieldname='f4', n=100, name='f4', type='SDRCategoryEncoder', w=21),
'f5': dict(fieldname='f5', n=100, name='f5', type='SDRCategoryEncoder', w=21),
'f6': dict(fieldname='f6', n=100, name='f6', type='SDRCategoryEncoder', w=21),
'f7': dict(fieldname='f7', n=100, name='f7', type='SDRCategoryEncoder', w=21),
'f8': dict(fieldname='f8', n=100, name='f8', type='SDRCategoryEncoder', w=21),
'f9': dict(fieldname='f9', n=100, name='f9', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 16,550 | Python | .py | 344 | 39.47093 | 110 | 0.643327 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,805 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/2field_few_6040/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,806 | Python | .py | 336 | 38.553571 | 110 | 0.647063 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,806 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/2field_few_balanced/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,807 | Python | .py | 336 | 38.553571 | 110 | 0.647021 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,807 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/2fields_many_skewed/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,806 | Python | .py | 336 | 38.553571 | 110 | 0.647063 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,808 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/simple/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'field0': dict(fieldname='field0', n=100, name='field0', type='SDRCategoryEncoder', w=21),
'p': dict(fieldname='p', n=100, name='p', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,815 | Python | .py | 336 | 38.580357 | 110 | 0.647269 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,809 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/10field_few_skewed/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
'f2': dict(fieldname='f2', n=100, name='f2', type='SDRCategoryEncoder', w=21),
'f3': dict(fieldname='f3', n=100, name='f3', type='SDRCategoryEncoder', w=21),
'f4': dict(fieldname='f4', n=100, name='f4', type='SDRCategoryEncoder', w=21),
'f5': dict(fieldname='f5', n=100, name='f5', type='SDRCategoryEncoder', w=21),
'f6': dict(fieldname='f6', n=100, name='f6', type='SDRCategoryEncoder', w=21),
'f7': dict(fieldname='f7', n=100, name='f7', type='SDRCategoryEncoder', w=21),
'f8': dict(fieldname='f8', n=100, name='f8', type='SDRCategoryEncoder', w=21),
'f9': dict(fieldname='f9', n=100, name='f9', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 16,550 | Python | .py | 344 | 39.47093 | 110 | 0.643327 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,810 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/2field_few_skewed/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,806 | Python | .py | 336 | 38.553571 | 110 | 0.647063 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,811 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/novel_combination/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.3,
'synPermActiveInc': 0.005,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : False,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,809 | Python | .py | 336 | 38.5625 | 110 | 0.647131 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,812 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/1field_few_skewed/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,713 | Python | .py | 335 | 38.435821 | 110 | 0.647555 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,813 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/10field_many_skewed/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,713 | Python | .py | 335 | 38.435821 | 110 | 0.647555 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,814 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/2field_many_novelAtEnd/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,806 | Python | .py | 336 | 38.553571 | 110 | 0.647063 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,815 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/spatial/1field_few_balanced/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,714 | Python | .py | 335 | 38.435821 | 110 | 0.647513 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,816 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/temporal/simple/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/current/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'f': {
'clipInput': True,
'fieldname': u'f',
'n': 100,
'name': u'f',
'minval': 0,
'maxval': 5,
'type': 'ScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.05,
'synPermInactiveDec': 0.008,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 0.0,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable': True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'cerebro_dummy',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'source': u'file://'+os.path.join(os.path.dirname(__file__), 'data.csv')}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'f', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'f', metric='passThruPrediction', inferenceElement='anomalyScore', params={'window': 1000}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 14,473 | Python | .py | 313 | 37.690096 | 114 | 0.640241 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,817 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/temporal/saw_200/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'f': { 'clipInput': True,
'fieldname': u'f',
'maxval': 200,
'minval': 0,
'n': 513,
'name': u'f',
'type': 'ScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'Artificial Data',
u'streams': [ { u'columns': [u'*'],
u'info': u'blah',
u'source': u'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'f', 'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'f', metric='aae', inferenceElement='prediction', params={'window': 100}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 14,342 | Python | .py | 307 | 37.631922 | 108 | 0.631937 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,818 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/temporal/saw_big/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'f': { 'clipInput': True,
'fieldname': u'f',
'maxval': 100,
'minval': 0,
'n': 100,
'name': u'f',
'type': 'AdaptiveScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'Artificial Data',
u'streams': [ { u'columns': [u'*'],
u'info': u'blah',
u'source': u'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'f', 'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'f', metric='aae', inferenceElement='prediction', params={'window': 1000}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 14,330 | Python | .py | 306 | 37.784314 | 108 | 0.633125 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,819 | description.py | numenta_nupic-legacy/examples/opf/experiments/anomaly/temporal/hotgym/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [('consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 12,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
},
'anomalyParams': {
'mode': 'likelihood', # pure(=default) / weighted / likelihood
'slidingWindowSize': 5, # >=0 / None
},
'trainSPNetOnlyIfRequested': False,
},
'predictionSteps': [1, 5],
'predictedField': 'consumption',
'numRecords': 4000,
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'last_record': config['numRecords'],
u'source': u'file://extra/hotgym/hotgym.csv'}],
'aggregation': config['aggregationInfo'],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{'predictedField': config['predictedField'],
'predictionSteps': config['predictionSteps']},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*aae.*'],
}
# Add multi-step prediction metrics
for steps in config['predictionSteps']:
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'aae', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'aae', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps}))
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 16,456 | Python | .py | 342 | 37.94152 | 108 | 0.616943 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,820 | make_datasets.py | numenta_nupic-legacy/examples/opf/experiments/missing_record/make_datasets.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets for the multi-step prediction experiments
"""
import os
import random
import datetime
from optparse import OptionParser
from nupic.data.file_record_stream import FileRecordStream
def _generateSimple(filename="simple.csv", numSequences=1, elementsPerSeq=3,
numRepeats=10):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
At the end of the dataset, we introduce missing records so that test
code can insure that the model didn't get confused by them.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('timestamp', 'datetime', 'T'),
('field1', 'string', ''),
('field2', 'float', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
# Put 1 hour between each record
timestamp = datetime.datetime(year=2012, month=1, day=1, hour=0, minute=0,
second=0)
timeDelta = datetime.timedelta(hours=1)
# Write out the sequences without missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
# Now, write some out with missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for i,x in enumerate(seq):
if i != 1:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for i,x in enumerate(seq):
if i != 1:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
# Write out some more of the sequences *without* missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
outFile.close()
if __name__ == '__main__':
helpString = \
"""%prog [options]
Generate artificial datasets for testing multi-step prediction """
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("No arguments accepted")
# Set random seed
random.seed(42)
# Create the dataset directory if necessary
datasetsDir = os.path.join(os.path.dirname(__file__), 'datasets')
if not os.path.exists(datasetsDir):
os.mkdir(datasetsDir)
# Generate the sample datasets
_generateSimple('simple_0.csv', numSequences=1, elementsPerSeq=3,
numRepeats=10)
| 4,638 | Python | .py | 112 | 36.375 | 80 | 0.66194 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,821 | description.py | numenta_nupic-legacy/examples/opf/experiments/missing_record/simple_0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_0.csv'),
'windowSize': 25,
'modelParams': {
'sensorParams': {
'verbosity': 0,
'encoders': {
'timestamp_timeOfDay': None,
'timestamp_dayOfWeek': None,
'field2': None,
}
},
'clParams': {
'verbosity': 0,
}
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,677 | Python | .py | 45 | 33.644444 | 78 | 0.64413 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,822 | description.py | numenta_nupic-legacy/examples/opf/experiments/missing_record/base/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [('timestamp', 'first'),
('field1', 'mode'),
('field2', 'mean')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_dayOfWeek': {
'dayOfWeek': (21, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'field1': { 'fieldname': u'field1',
'n': 100,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21},
'field2': { 'clipInput': True,
'fieldname': u'field2',
'maxval': 50,
'minval': 0,
'n': 500,
'name': u'field2',
'type': 'ScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 10.0,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : False,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 16,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
'predictionSteps': [1],
'predictedField': 'field1',
'dataSource': 'fillInBySubExperiment',
'windowSize': 200,
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Fill in classifier steps
config['modelParams']['clParams']['steps'] = '%s' % \
(','.join([str(x) for x in config['predictionSteps']]))
# If the predicted field is field1 (category), use avg_err else if field 2
# (scalar) use aae as the metric
if config['predictedField'] == 'field1':
metricName = 'avg_err'
loggedMetrics = ['.*avg_err.*']
else:
metricName = 'aae'
loggedMetrics = ['.*aae.*']
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'missingRecord',
'streams': [ {
'columns': ['*'],
'info': 'missingRecord',
'source': config['dataSource'],
}],
'aggregation': config['aggregationInfo'],
'version': 1
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{'predictedField': config['predictedField'],
'predictionSteps': config['predictionSteps']},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=config['predictedField'], metric=metricName,
inferenceElement='prediction', params={
'window': config['windowSize']}),
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction', params={'errorMetric': metricName,
'window': config['windowSize']}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': loggedMetrics,
}
# Add multi-step prediction metrics
for steps in config['predictionSteps']:
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': metricName,
'window': config['windowSize'],
'steps': steps}))
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 16,820 | Python | .py | 363 | 36.449036 | 108 | 0.615371 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,823 | base.py | numenta_nupic-legacy/examples/opf/experiments/opfrunexperiment_test/checkpoints/base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 1
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
'dataPath': None, # filled in by sub-experiment
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (os.path.abspath(config['dataPath'])),
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : 4000,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 14,780 | Python | .py | 320 | 37.878125 | 159 | 0.639358 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,824 | description.py | numenta_nupic-legacy/examples/opf/experiments/opfrunexperiment_test/checkpoints/a_plus_b/description.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'aggregationInfo' : {'seconds': 0, 'fields': [(u'c1', 'first'), (u'c0', 'first')], 'months': 0, 'days': 0, 'years': 0, 'hours': 1, 'microseconds': 0, 'weeks': 0, 'minutes': 0, 'milliseconds': 0},
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'inferenceType': 'NontemporalMultiStep', 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'dataPath': 'experiments/opfrunexperiment_test/checkpoints/data/a_plus_b.csv',
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,375 | Python | .py | 36 | 64.583333 | 452 | 0.636208 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,825 | description.py | numenta_nupic-legacy/examples/opf/experiments/opfrunexperiment_test/checkpoints/a/description.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'aggregationInfo' : {'seconds': 0, 'fields': [(u'c1', 'first'), (u'c0', 'first')], 'months': 0, 'days': 0, 'years': 0, 'hours': 1, 'microseconds': 0, 'weeks': 0, 'minutes': 0, 'milliseconds': 0},
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'inferenceType': 'NontemporalMultiStep', 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'dataPath': 'experiments/opfrunexperiment_test/checkpoints/data/a.csv',
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,368 | Python | .py | 36 | 64.388889 | 452 | 0.635972 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,826 | description.py | numenta_nupic-legacy/examples/opf/experiments/opfrunexperiment_test/checkpoints/b/description.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'aggregationInfo' : {'seconds': 0, 'fields': [(u'c1', 'first'), (u'c0', 'first')], 'months': 0, 'days': 0, 'years': 0, 'hours': 1, 'microseconds': 0, 'weeks': 0, 'minutes': 0, 'milliseconds': 0},
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'inferenceType': 'NontemporalMultiStep', 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'dataPath': 'experiments/opfrunexperiment_test/checkpoints/data/b.csv',
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,369 | Python | .py | 36 | 64.388889 | 452 | 0.635972 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,827 | description.py | numenta_nupic-legacy/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_1hr_agg/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'sum')],
'days': 0,
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 1,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'timestamp_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
u'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : { u'days': 0, u'hours': 0},
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'last_record': 100,
u'source': u'file://extra/hotgym/hotgym.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [1, 5]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}),
MetricSpec(field=u'consumption', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1, 5], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,037 | Python | .py | 323 | 38.142415 | 170 | 0.639492 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,828 | description.py | numenta_nupic-legacy/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'timestamp_timeOfDay': dict(fieldname='timestamp', type='DateEncoder',timeOfDay=(21,8)),
'timestamp_dayOfWeek': dict(fieldname='timestamp', type='DateEncoder',dayOfWeek=(21,3)),
'consumption': dict(fieldname='consumption',type='ScalarEncoder',
name='consumption', minval=0,maxval=200,
clipInput=True, n=153, w=21, ) #resolution=5),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "OnlineLearning",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my gym.csv dataset',
'source': 'file://extra/gym/gym_melbourne.csv',
'first_record': 0,
'last_record': 4000
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
MetricSpec(field=u'consumption',
inferenceElement='prediction',
metric='aae',
params={'window': 200}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*', ".*aae.*"],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 16,280 | Python | .py | 345 | 38.414493 | 110 | 0.643515 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,829 | description.py | numenta_nupic-legacy/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_no_agg/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'sum')],
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 1,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'timestamp_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
u'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : { u'days': 0, u'hours': 0},
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'last_record': 100,
u'source': u'file://extra/hotgym/hotgym.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [1, 5]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}),
MetricSpec(field=u'consumption', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1, 5], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,037 | Python | .py | 323 | 38.142415 | 170 | 0.639492 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,830 | descriptionTemplate.py | numenta_nupic-legacy/examples/opf/experiments/template/base/descriptionTemplate.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script is a template for OPF description.py experiment description scripts.
Workflow: to create a new OPF description.py, start by branching this
template in source control. Branching via source control may make it easier to
integrate future template improvements into your description.py.
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.prediction_metrics_manager import MetricSpec
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer,
InferenceSpecNonTemporal,
InferenceSpecTemporal
)
# ------------------------------------------------------------------------------
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed between
# the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup. This facility is
# particularly handy for enabling substitution of values in the config
# dictionary from other values in the config dictionary, which is needed by
# permutation.py-based experiments. These values will be resolved during the
# call to applyValueGettersToContainer(), which we call after the base
# experiment's config dictionary is updated from the sub-experiment. See
# ValueGetterBase and DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to sub-experiment/permutation
# overrides, define a variable in this section, using key names beginning with a
# single underscore character to avoid collisions with pre-defined keys (e.g.,
# _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
#
config = {
# Type of model that the rest of these parameters apply to
'model' : "HTMPrediction",
##############################################################################
# Dataset Aggregation Parameters (for training and inference datasets)
##############################################################################
# Time-based Dataset Aggregation rules;
#
# Usage details and additional options: see
# nupic.data.aggregator.generateDataset()
#
# Aggregation presently begins at the start of the dataset. For every
# aggregation period, the records within the period are coalesced into a
# single record per rules specified via the aggregationInfo property.
#
# Value schema:
# {
# 'periodUnit1':value1, 'periodUnit2':value2, ...,
# 'fields':[('fieldNameA', aggFuncNameA), ('fieldNameB', aggFuncNameB)]
# }
#
# Aggregation period units: combination of 0 or more unit/value properties:
# [years months] | [weeks days hours minutes seconds milliseconds microseconds]
# NOTE: years and months are mutually-exclusive with the other units.
# Example2: hours=1, minutes=30,
#
# Aggregation is disabled if the aggregationInfo key is omitted or all
# expressed period unit values evaluate to 0
#
# Aggregation fields: list of field-name/aggregationFunctionName tuples;
# e.g.: ("consumpion", "mean").
#
# Supported function names: "first", "last", "mean", "sum" (per
# nupic.data.aggregator.py)
#
# NOTE: Designated Sequence id, Reset, and Timestamp fields are included
# automatically if not specified in aggregation fields.
#
# Aggregation period can be permuted over, so is separated out
# (generated from AGGREGATION_PERIOD)
'__aggregationPeriod' : { 'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
# (value generated from AGGREGATION_INFO)
'aggregationInfo' : {
'years': DeferredDictLookup('__aggregationPeriod', 'years'),
'months': DeferredDictLookup('__aggregationPeriod', 'months'),
'weeks': DeferredDictLookup('__aggregationPeriod', 'weeks'),
'days': DeferredDictLookup('__aggregationPeriod', 'days'),
'hours': DeferredDictLookup('__aggregationPeriod', 'hours'),
'minutes': DeferredDictLookup('__aggregationPeriod', 'minutes'),
'seconds': DeferredDictLookup('__aggregationPeriod', 'seconds'),
'milliseconds': DeferredDictLookup('__aggregationPeriod', 'milliseconds'),
'microseconds': DeferredDictLookup('__aggregationPeriod', 'microseconds'),
'fields' : [
# fieldname : aggregation function name
('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')
],
},
##############################################################################
# Sensor Region Parameters
##############################################################################
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing at each step
# 0: silent; >=1: some info; >=2: more info; >=3: even more info
# (see compute() in py/regions/RecordSensor.py)
#
'sensorVerbosity' : 0,
# A dictionary specifying the period for automatically-generated resets from
# a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if all of the
# specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
'sensorAutoReset' : None,
# Dataset Encoder consists of field encoders that convert dataset record fields
# to the internal representations suitable for input to the Sensor Region.
#
# Each field encoder dict must have the following keys per
# nupic.encoders.MultiEncoder (multi.py):
# 1) data fieldname ('fieldname')
# 2) an encoder type ('type')
# 3) and the encoder params (all other keys)
#
# See specific encoder modules (e.g., sdrcateogry.py, scalar.py,
# date.py, etc.) for encoder type values and descriptions of their specific params.
#
# Schema that describes how to build the encoder configuration.
#
# dsEncoderSchema: [encoderSpec1, encoderSpec2, ...]
# encoderSpec: dictionary of parameters describing the field encoder
#
# In this dsEncoderSchema example, the field name "Name1" is a timestamp,
# "Name2" is a scalar quantity, and "Name3" is a category
#
'dsEncoderSchema' : [
dict(fieldname='Name1', type='DateEncoder', timeOfDay=(5,5)),
dict(fieldname='Name2', type='ScalarEncoder',
name='Name2', minval=0, maxval=270, clipInput=True,
n=70, w=5),
dict(fieldname='Name3', type='SDRCategoryEncoder', name="Name3",
n=DeferredDictLookup('claRegionNColumns'),
w=DeferredDictLookup('spNumActivePerInhArea')),
],
##############################################################################
# General CLA Region Parameters
##############################################################################
# Number of cell columns in the cortical region (same number for SP and TM)
# (see also tpNCellsPerCol)
# Replaces: spCoincCount
'claRegionNColumns' : 2048,
##############################################################################
# Spatial Pooler (SP) Parameters (SP is always enabled in OPF)
##############################################################################
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
#
'spVerbosity' : 0,
# Print/logs stats every N iterations; 0 = disable stats
'spPrintStatsPeriodIter' : 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when there are more,
# the weaker ones are suppressed)
#
'spNumActivePerInhArea' : 40,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'spCoincInputPoolPct' : 1.0,
##############################################################################
# Temporal Pooler (TM) Parameters
##############################################################################
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
#
'tpVerbosity' : 0,
# Print stats every N iterations during training; 0 = disable stats
# TODO Why aren't experiments configuring stats for the inference phase? It seems
# like SP stats are dumped by SP Pooler directly regardless of whether it's
# in training or inference phase. (waiting for email from Ron)
# TODO: In LPF, these were accumulated/printed via iter/final callbacks installed
# by LPF; solve in OPF.
'tpTrainPrintStatsPeriodIter' : 0,
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting the next
# inputs. Without TM, the model is only capable of reconstructing missing sensor
# inputs (via SP).
#
'tmEnable' : True,
# The number of cells (i.e., states), allocated per column
#
'tpNCellsPerCol' : 32,
# Initial Permanence
# TODO need better explanation
#
'tpInitialPerm' : 0.21,
# Permanence Increment
#
'tpPermanenceInc' : 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc value
#
'tpPermanenceDec' : None,
# Temporal Pooler implementation selector (see _getTPClass in CLARegion.py)
#
'tpImplementation' : 'cpp',
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO for Ron: once the appropriate value is placed in TM constructor, see if
# we should eliminate this parameter from description.py
#
'tpMaxSegmentsPerCell' : 128,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold connected
# synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'tpSegmentActivationThreshold' : None,
# Minimum number of active synapses for a segment to be considered during
# search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'tpMinSegmentMatchSynapseThreshold' : None,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO for Ron: once the appropriate value is placed in TM constructor, see if
# we should eliminate this parameter from description.py
#
'tpMaxSynapsesPerSegment' : 32,
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO need better explanation
#
'tpNewSynapseCount' : 15,
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# ------------------------------------------------------------------------------
# Tasks
#
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "OnlineLearning",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my gym.csv dataset',
'source': 'file://extra/gym/gym.csv',
'first_record': 0,
'last_record': 4000
}
],
'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000),
#IterationPhaseSpecInferOnly(10),
],
# Inference specifications: sequence of opf_task_driver.InferenceSpecXXXXX
# instances that indicate which inferences to perform and which metrics to
# gather for each inference step. Note that it is up to the client
# to decide what to do with these metrics
'inferences' : [
InferenceSpecNonTemporal(
# [optional] Names of fields whose values are to be withheld (i.e.,
# replaced with missing value sentinels) for testing of non-temporal
# reconstruction; the original values of those fields will be used as
# ground-truth during metrics calculation. If omitted, all field values
# will be used unaltered as inputs to the model.
testFields=("consumption",),
# [optional] Sequence of inference metrics to gather. If omitted,
# no metrics will be gathered for this inference type
metrics=(
MetricSpec(metric='rmse', field="consumption"),
)
),
InferenceSpecTemporal(
# [optional] Sequence of inference metrics to gather. If omitted,
# no metrics will be gathered for this inference type
metrics=(
MetricSpec(metric='rmse', field='consumption'),
),
),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': [],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
taskList=tasks)
| 17,842 | Python | .py | 399 | 40.007519 | 109 | 0.666743 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,831 | make_datasets.py | numenta_nupic-legacy/examples/opf/experiments/multistep/make_datasets.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets for the multi-step prediction experiments
"""
import os
import numpy
import random
from optparse import OptionParser
from nupic.data.file_record_stream import FileRecordStream
def _generateSimple(filename="simple.csv", numSequences=2, elementsPerSeq=1,
numRepeats=10, resets=False):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
resets: if True, turn on reset at start of each sequence
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('reset', 'int', 'R'),
('field1', 'string', ''),
('field2', 'float', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
reset = int(resets)
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([reset, str(x), x])
reset = 0
outFile.close()
def _generateOverlapping(filename="overlap.csv", numSequences=2, elementsPerSeq=3,
numRepeats=10, hub=[0,1], hubOffset=1, resets=False):
""" Generate a temporal dataset containing sequences that overlap one or more
elements with other sequences.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
hub: sub-sequence to place within each other sequence
hubOffset: where, within each sequence, to place the hub
resets: if True, turn on reset at start of each sequence
"""
# Check for conflicts in arguments
assert (hubOffset + len(hub) <= elementsPerSeq)
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('reset', 'int', 'R'),
('field1', 'string', ''),
('field2', 'float', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences with the hub in the middle
sequences = []
nextElemIdx = max(hub)+1
for _ in range(numSequences):
seq = []
for j in range(hubOffset):
seq.append(nextElemIdx)
nextElemIdx += 1
for j in hub:
seq.append(j)
j = hubOffset + len(hub)
while j < elementsPerSeq:
seq.append(nextElemIdx)
nextElemIdx += 1
j += 1
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for _ in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
reset = int(resets)
seq = sequences[seqIdx]
for (x) in seq:
outFile.appendRecord([reset, str(x), x])
reset = 0
outFile.close()
def _generateFirstOrder0():
""" Generate the initial, first order, and second order transition
probabilities for 'probability0'. For this model, we generate the following
set of sequences:
.1 .75
0----1-----2
\ \
\ \ .25
\ \-----3
\
\ .9 .5
\--- 4--------- 2
\
\ .5
\---------3
Parameters:
----------------------------------------------------------------------
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
categoryList: list of category names to use
Here is an example of some return values when there are 3 categories
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# --------------------------------------------------------------------
# Initial probabilities, 'a' and 'e' equally likely
numCategories = 5
initProb = numpy.zeros(numCategories)
initProb[0] = 1.0
# --------------------------------------------------------------------
# 1st order transitions
firstOrder = dict()
firstOrder['0'] = numpy.array([0, 0.1, 0, 0, 0.9])
firstOrder['1'] = numpy.array([0, 0, 0.75, 0.25, 0])
firstOrder['2'] = numpy.array([1.0, 0, 0, 0, 0])
firstOrder['3'] = numpy.array([1.0, 0, 0, 0, 0])
firstOrder['4'] = numpy.array([0, 0, 0.5, 0.5, 0])
# --------------------------------------------------------------------
# 2nd order transitions don't apply
secondOrder = None
# Generate the category list
categoryList = ['%d' % x for x in range(5)]
return (initProb, firstOrder, secondOrder, 3, categoryList)
def _generateFileFromProb(filename, numRecords, categoryList, initProb,
firstOrderProb, secondOrderProb, seqLen, numNoise=0, resetsEvery=None):
""" Generate a set of records reflecting a set of probabilities.
Parameters:
----------------------------------------------------------------
filename: name of .csv file to generate
numRecords: number of records to generate
categoryList: list of category names
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrderProb: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrderProb: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two. If this is None, then the sequences will be
first order only.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table. None means infinite
length.
numNoise: Number of noise elements to place between each
sequence. The noise elements are evenly distributed from
all categories.
resetsEvery: If not None, generate a reset every N records
Here is an example of some parameters:
categoryList: ['cat1', 'cat2', 'cat3']
initProb: [0.7, 0.2, 0.1]
firstOrderProb: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrderProb: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# Create the file
print "Creating %s..." % (filename)
fields = [('reset', 'int', 'R'),
('field1', 'string', ''),
('field2', 'float', '')]
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
outFile = FileRecordStream(pathname, write=True, fields=fields)
# --------------------------------------------------------------------
# Convert the probabilitie tables into cumulative probabilities
initCumProb = initProb.cumsum()
firstOrderCumProb = dict()
for (key,value) in firstOrderProb.iteritems():
firstOrderCumProb[key] = value.cumsum()
if secondOrderProb is not None:
secondOrderCumProb = dict()
for (key,value) in secondOrderProb.iteritems():
secondOrderCumProb[key] = value.cumsum()
else:
secondOrderCumProb = None
# --------------------------------------------------------------------
# Write out the sequences
elementsInSeq = []
numElementsSinceReset = 0
maxCatIdx = len(categoryList) - 1
for _ in xrange(numRecords):
# Generate a reset?
if numElementsSinceReset == 0:
reset = 1
else:
reset = 0
# Pick the next element, based on how are we are into the 2nd order
# sequence.
rand = numpy.random.rand()
# Generate 1st order sequences
if secondOrderCumProb is None:
if len(elementsInSeq) == 0:
catIdx = numpy.searchsorted(initCumProb, rand)
elif len(elementsInSeq) >= 1 and \
(seqLen is None or len(elementsInSeq) < seqLen-numNoise):
catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq[-1])],
rand)
else: # random "noise"
catIdx = numpy.random.randint(len(categoryList))
# Generate 2nd order sequences
else:
if len(elementsInSeq) == 0:
catIdx = numpy.searchsorted(initCumProb, rand)
elif len(elementsInSeq) == 1:
catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand)
elif (len(elementsInSeq) >=2) and \
(seqLen is None or len(elementsInSeq) < seqLen-numNoise):
catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-2:])], rand)
else: # random "noise"
catIdx = numpy.random.randint(len(categoryList))
# -------------------------------------------------------------------
# Write out the record
catIdx = min(maxCatIdx, catIdx)
outFile.appendRecord([reset, categoryList[catIdx], catIdx])
#print categoryList[catIdx]
# ------------------------------------------------------------
# Increment counters
elementsInSeq.append(catIdx)
numElementsSinceReset += 1
# Generate another reset?
if resetsEvery is not None and numElementsSinceReset == resetsEvery:
numElementsSinceReset = 0
elementsInSeq = []
# Start another 2nd order sequence?
if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise):
elementsInSeq = []
outFile.close()
if __name__ == '__main__':
helpString = \
"""%prog [options]
Generate artificial datasets for testing multi-step prediction """
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("No arguments accepted")
# Set random seed
random.seed(42)
# Create the dataset directory if necessary
datasetsDir = os.path.join(os.path.dirname(__file__), 'datasets')
if not os.path.exists(datasetsDir):
os.mkdir(datasetsDir)
# Generate the sample datasets
_generateSimple('simple_0.csv', numSequences=2, elementsPerSeq=5,
numRepeats=30)
_generateSimple('simple_1.csv', numSequences=10, elementsPerSeq=5,
numRepeats=20)
_generateOverlapping('simple_2.csv', numSequences=10, elementsPerSeq=5,
numRepeats=20, hub=[0,1], hubOffset=1, resets=False)
_generateSimple('simple_3.csv', numSequences=2, elementsPerSeq=10,
numRepeats=30, resets=False)
# The first order 0 dataset
(initProb, firstOrderProb, secondOrderProb, seqLen, categoryList) = \
_generateFirstOrder0()
_generateFileFromProb(filename='first_order_0.csv', numRecords=1000,
categoryList=categoryList, initProb=initProb,
firstOrderProb=firstOrderProb, secondOrderProb=secondOrderProb,
seqLen=seqLen, numNoise=0, resetsEvery=None)
| 15,357 | Python | .py | 336 | 36.988095 | 86 | 0.579929 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,832 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym_best_enc/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': ( 21,
1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}},
'verbosity': 0},
'spEnable': False,
'spParams': { },
'tmEnable': False,
'tmParams': { 'activationThreshold': 14,
'minThreshold': 12,
'verbosity': 0}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| 3,637 | Python | .py | 56 | 35.839286 | 107 | 0.388873 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,833 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/simple_3_enc/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_3.csv'),
'modelParams': { 'clParams': { 'verbosity': 1, 'steps': '1,3'},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { }, 'verbosity': 1},
'spEnable': False,
'spParams': { },
'tmEnable': False,
'tmParams': { }},
'predictionSteps': [1, 3]}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,783 | Python | .py | 38 | 41.578947 | 78 | 0.620908 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,834 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/simple_1_f2/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_1.csv'),
'modelParams': { 'clParams': { },
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }},
'predictedField': 'field2'}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,618 | Python | .py | 35 | 42.142857 | 78 | 0.638379 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,835 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym_best_sp_5step_16K/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': ( 21,
3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
1),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { },
'tmParams': { 'activationThreshold': 13,
'minThreshold': 9,
'verbosity': 0}},
'numRecords': 16000}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| 3,269 | Python | .py | 52 | 36.557692 | 107 | 0.41052 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,836 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/simple_0_f2/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_0.csv'),
'modelParams': { 'clParams': { },
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }},
'predictedField': 'field2'}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,618 | Python | .py | 35 | 42.142857 | 78 | 0.638379 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,837 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/simple_1/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_1.csv'),
'modelParams': { 'clParams': { },
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }}}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,588 | Python | .py | 34 | 42.588235 | 78 | 0.637419 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,838 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym_best_sp/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': None,
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
8),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { 'spVerbosity': 0},
'tmParams': { 'activationThreshold': 14,
'minThreshold': 12,
'verbosity': 1}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| 2,842 | Python | .py | 47 | 38.297872 | 107 | 0.448585 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,839 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym_best_sp_16K/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': None,
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
8),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { 'spVerbosity': 0},
'tmParams': { 'activationThreshold': 14,
'minThreshold': 12,
'verbosity': 1}},
'numRecords': 16000}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| 2,865 | Python | .py | 48 | 37.916667 | 107 | 0.450409 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,840 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/simple_2/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_2.csv'),
'modelParams': { 'clParams': { },
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }}}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,588 | Python | .py | 34 | 42.588235 | 78 | 0.637419 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,841 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym_best_tp/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': None,
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
8),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { },
'tmParams': { 'activationThreshold': 14,
'minThreshold': 12,
'verbosity': 0}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| 2,766 | Python | .py | 46 | 37.913043 | 107 | 0.444404 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,842 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/simple_3_f2/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_3.csv'),
'modelParams': { 'clParams': { 'verbosity': 0, 'steps': '1,3'},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }},
'predictedField': 'field2',
'predictionSteps': [1, 3]}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,677 | Python | .py | 36 | 42.527778 | 78 | 0.63653 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,843 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/simple_3/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_3.csv'),
'modelParams': { 'clParams': { 'verbosity': 0, 'steps': '1,3'},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }},
'predictionSteps': [1, 3]}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,647 | Python | .py | 35 | 42.971429 | 78 | 0.635572 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,844 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/simple_0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_0.csv'),
'modelParams': { 'clParams': { },
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }}}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,588 | Python | .py | 34 | 42.588235 | 78 | 0.637419 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,845 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym_best_sp_5step/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': ( 21,
3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
1),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { },
'tmParams': { 'activationThreshold': 13,
'minThreshold': 9,
'verbosity': 0}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| 3,246 | Python | .py | 51 | 36.882353 | 107 | 0.408649 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,846 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym_best_tp_16K/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': None,
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
8),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { },
'tmParams': { 'activationThreshold': 14,
'minThreshold': 12,
'verbosity': 0}},
'numRecords': 16000}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| 2,789 | Python | .py | 47 | 37.531915 | 107 | 0.446311 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,847 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/base/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'field1': { 'fieldname': u'field1',
'n': 100,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21},
'field2': { 'clipInput': True,
'fieldname': u'field2',
'maxval': 50,
'minval': 0,
'n': 500,
'name': u'field2',
'type': 'ScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 0.0,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 16,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.25,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
'predictionSteps': [1],
'predictedField': 'field1',
'dataSource': 'fillInBySubExperiment',
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Fill in classifier steps
config['modelParams']['clParams']['steps'] = '%s' % \
(','.join([str(x) for x in config['predictionSteps']]))
# If the predicted field is field1 (category), use avg_err else if field 2
# (scalar) use aae as the metric
if config['predictedField'] == 'field1':
metricName = 'avg_err'
loggedMetrics = ['.*avg_err.*']
else:
metricName = 'aae'
loggedMetrics = ['.*aae.*']
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'multistep',
'streams': [ {
'columns': ['*'],
'info': 'multi-step',
'source': config['dataSource'],
}],
'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{'predictedField': config['predictedField'],
'predictionSteps': config['predictionSteps']},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=config['predictedField'], metric=metricName,
inferenceElement='prediction', params={'window': 200}),
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction', params={'errorMetric': metricName,
'window': 200}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': loggedMetrics,
}
# Add multi-step prediction metrics
for steps in config['predictionSteps']:
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': metricName, 'window': 200,
'steps': steps}))
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,963 | Python | .py | 344 | 37.19186 | 108 | 0.62886 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,848 | permutations_simple_3.py | numenta_nupic-legacy/examples/opf/experiments/multistep/base/permutations_simple_3.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
import os
from pkg_resources import resources_filename
from nupic.swarming.permutation_helpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'field2'
permutations = {
# Encoder permutation choices
# Example:
#
# '__gym_encoder' : PermuteEncoder('gym', 'SDRCategoryEncoder', w=7,
# n=100),
#
# '__address_encoder' : PermuteEncoder('address', 'SDRCategoryEncoder',
# w=7, n=100),
#
# '__timestamp_timeOfDay_encoder' : PermuteEncoder('timestamp',
# 'DateEncoder.timeOfDay', w=7, radius=PermuteChoices([1, 8])),
#
# '__timestamp_dayOfWeek_encoder' : PermuteEncoder('timestamp',
# 'DateEncoder.dayOfWeek', w=7, radius=PermuteChoices([1, 3])),
#
# '__consumption_encoder' : PermuteEncoder('consumption', 'ScalarEncoder',
# w=7, n=PermuteInt(13, 500, 20), minval=0,
# maxval=PermuteInt(100, 300, 25)),
#
# (generated from PERM_ENCODER_CHOICES)
'predictedField': 'field2',
'predictionSteps': [1,3],
relativePath = os.path.join("examples", "opf", "experiments", "multistep",
"datasets", "simple_3.csv")
'dataSource': 'file://%s' % (resource_filename("nupic", relativePath)),
'__field2_encoder' : PermuteEncoder(fieldName='field2',
clipInput=True, minval = 0, maxval=50,
encoderClass='ScalarEncoder',
w=21, n=PermuteChoices([500])),
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*field2.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'prediction:aae:window=1000:field=consumption')
minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=3:window=200:field=field2"
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
| 4,041 | Python | .py | 92 | 40.869565 | 97 | 0.691233 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,849 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/simple_3_sp/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_3.csv'),
'modelParams': { 'clParams': { 'verbosity': 0, 'steps': '1,3'},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }},
'predictionSteps': [1, 3]}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,707 | Python | .py | 36 | 42.888889 | 78 | 0.632873 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,850 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym_best_tp_5step/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': ( 21,
3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
1),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { },
'tmParams': { 'activationThreshold': 13,
'minThreshold': 9,
'verbosity': 0}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| 3,186 | Python | .py | 50 | 36.82 | 107 | 0.405811 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,851 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [('consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
# Spatial Pooler implementation selector, see getSPClass
# in py/regions/SPRegion.py for details
# 'py' (default), 'cpp' (speed optimized, new)
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 10.0,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
},
'trainSPNetOnlyIfRequested': False,
},
'predictionSteps': [1, 5],
'predictedField': 'consumption',
'numRecords': 4000,
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'last_record': config['numRecords'],
u'source': u'file://extra/hotgym/hotgym.csv'}],
'aggregation': config['aggregationInfo'],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{'predictedField': config['predictedField'],
'predictionSteps': config['predictionSteps']},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*aae.*'],
}
# Add multi-step prediction metrics
for steps in config['predictionSteps']:
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'aae', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'aae', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps}))
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 16,783 | Python | .py | 346 | 38.265896 | 108 | 0.618064 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,852 | permutations_sp.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym/permutations_sp.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.swarming.permutation_helpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'consumption'
ENC_WIDTH = 21
permutations = {
'inferenceType': 'NontemporalMultiStep',
'tmEnable': False,
# Encoder permutation choices
# Example:
#
# '__gym_encoder' : PermuteEncoder('gym', 'SDRCategoryEncoder', w=7,
# n=100),
#
# '__address_encoder' : PermuteEncoder('address', 'SDRCategoryEncoder',
# w=7, n=100),
#
# '__timestamp_timeOfDay_encoder' : PermuteEncoder('timestamp',
# 'DateEncoder.timeOfDay', w=7, radius=PermuteChoices([1, 8])),
#
# '__timestamp_dayOfWeek_encoder' : PermuteEncoder('timestamp',
# 'DateEncoder.dayOfWeek', w=7, radius=PermuteChoices([1, 3])),
#
# '__consumption_encoder' : PermuteEncoder('consumption', 'ScalarEncoder',
# w=7, n=PermuteInt(13, 500, 20), minval=0,
# maxval=PermuteInt(100, 300, 25)),
#
# (generated from PERM_ENCODER_CHOICES)
'__timestamp_timeOfDay_encoder' : PermuteEncoder(fieldName='timestamp',
encoderClass='DateEncoder.timeOfDay',
w=ENC_WIDTH, radius=PermuteFloat(0.5, 12)),
'__timestamp_dayOfWeek_encoder' : PermuteEncoder(fieldName='timestamp',
encoderClass='DateEncoder.dayOfWeek', w=ENC_WIDTH,
radius=PermuteFloat(1, 6)),
'__timestamp_weekend_encoder' : PermuteEncoder(fieldName='timestamp',
encoderClass='DateEncoder.weekend', w=ENC_WIDTH,
radius=PermuteChoices([1])),
'__consumption_encoder' : PermuteEncoder(fieldName='consumption',
encoderClass='AdaptiveScalarEncoder', w=ENC_WIDTH,
n=PermuteInt(28, 521), clipInput=True),
'tpSegmentActivationThreshold': 14,
'tpMinSegmentMatchSynapseThreshold': 12,
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*consumption.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'prediction:aae:window=1000:field=consumption')
minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=1000:field=consumption"
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
| 4,426 | Python | .py | 98 | 41.744898 | 103 | 0.69898 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,853 | permutations.py | numenta_nupic-legacy/examples/opf/experiments/multistep/hotgym/permutations.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.swarming.permutation_helpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'consumption'
permutations = {
'modelParams': {
'inferenceType': PermuteChoices(['NontemporalMultiStep', 'TemporalMultiStep']),
'sensorParams': {
'encoders': {
'timestamp_dayOfWeek': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.dayOfWeek', radius=PermuteFloat(1.000000, 6.000000), w=21),
'timestamp_timeOfDay': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.timeOfDay', radius=PermuteFloat(0.500000, 12.000000), w=21),
'consumption': PermuteEncoder(fieldName='consumption', encoderClass='AdaptiveScalarEncoder', n=PermuteInt(28, 521), w=21, clipInput=True),
'timestamp_weekend': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.weekend', radius=PermuteChoices([1]), w=21),
},
},
'tmParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
'pamLength': PermuteInt(1, 5),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*consumption.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'prediction:aae:window=1000:field=consumption')
minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=1000:field=consumption"
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
| 3,704 | Python | .py | 77 | 45.012987 | 155 | 0.717494 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,854 | description.py | numenta_nupic-legacy/examples/opf/experiments/multistep/first_order_0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/first_order_0.csv'),
'modelParams': { 'clParams': { 'verbosity': 0, 'steps': '1,2,3'},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }},
'predictionSteps': [1, 2, 3]}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,657 | Python | .py | 35 | 43.257143 | 78 | 0.635352 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,855 | EnsembleOnline.py | numenta_nupic-legacy/examples/opf/experiments/params/EnsembleOnline.py | import random
import multiprocessing
import numpy as np
from nupic.frameworks.opf import helpers
from nupic.frameworks.opf.client import Client
from random import shuffle
from random import randrange, uniform
import copy
windowSize=36
r=30
predictedField='pounds'
inertia=0.25
socRate=1.0
class Worker(multiprocessing.Process):
def __init__(self, work_queue, result_queue, stableSize, windowSize, predictedField, modeldata, iden):
multiprocessing.Process.__init__(self)
# job management
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
#Model State
self.stableSize=stableSize
self.windowSize=windowSize
self.stableUpdateStepSize=1
self.iden=iden
self.truth=[]
self.predictedField=predictedField
self.modeldata=modeldata
self.numModels=len(modeldata)
self.M={}
self.Scores={}
self.predictionStreams={}
self.median=True
self.index=-1
self.modelCapacity=len(modelData)
def run(self):
self.initM(modelData)
while not self.kill_received:
jobaux = self.work_queue.get()
command=jobaux[0]
if command=='predict':
self.index=self.index+1
self.updateModelStats()
self.result_queue.put([(self.Scores[m], self.predictionStreams[m][-1], self.truth[self.index], m) for m in self.M.keys()])
if command=='getPredictionStreams':
self.result_queue.put(dict([(m, self.predictionStreams[m][:-windowSize]) for m in self.predictionStreams.keys()]))
if command=='delete':
delList=jobaux[1]
for d in delList:
if(d in self.M):
del self.M[d]
del self.Scores[d]
del self.predictionStreams[d]
print 'deleted Model'+str(d)+" in process "+str(self.iden)
print "number of models remaining in "+str(self.iden)+": "+str(len(self.M))
self.result_queue.put(self.iden)
if command=='getAAEs':
self.result_queue.put([(m, computeAAE(self.truth, self.predictionStreams[m],r ), self.getModelState(self.M[m]), self.M[m]['modelDescription']) for m in self.M.keys()])
if command=='addPSOVariants':
for t in jobaux[1]:
if(t[0]==self.iden):
name=t[2]
modelDescription=t[1][0]
x=t[1][1]
v=t[1][2]
self.M[name]={}
self.M[name]['modelDescription']=modelDescription
self.M[name]['client']=Client(**modelDescription)
self.M[name]['alive']=True
self.M[name]['start']=0
self.M[name]['end']=None
self.M[name]['x']=x
self.M[name]['v']=v
self.Scores[name]=10000
self.predictionStreams[name]=[0,]
print "added new model "+str(name)+" to process"+str(self.iden)
# store the result
def getModelState(self, d):
return d['x'], d['v']
def initM(self, modelDatList):
for modelData in modelDatList:
name=modelData[0]
self.M[name]={}
self.M[name]['modelDescription']=modelData[1]
self.M[name]['client']=Client(**modelData[1])
alpha=modelData[1]['modelConfig']['modelParams']['clParams']['alpha']
n=0
for encoder in modelData[1]['modelConfig']['modelParams']['sensorParams']['encoders']:
if encoder['name']==predictedField:
n=encoder['n']
synPermInactiveDec=modelData[1]['modelConfig']['modelParams']['spParams']['synPermInactiveDec']
activationThreshold=modelData[1]['modelConfig']['modelParams']['tmParams']['activationThreshold']
pamLength=modelData[1]['modelConfig']['modelParams']['tmParams']['pamLength']
self.M[name]['x']=np.array([alpha, n,synPermInactiveDec,activationThreshold, pamLength ])
vAlpha=uniform(0.01, 0.15)
vN=randrange(30, 200, 5)
vSynPermInactiveDec=uniform(0.01, 0.15)
vActivationThreshold=randrange(12, 17, 1)
vPamLength=randrange(1, 6, 1)
self.M[name]['v']=np.array([vAlpha, vN,vSynPermInactiveDec,vActivationThreshold,vPamLength])
self.M[name]['alive']=True
self.M[name]['start']=0
self.M[name]['end']=None
self.Scores[name]=10000
self.predictionStreams[name]=[0,]
def updateModelStats(self):
updatedTruth=False
for m in self.M.keys():
truth, prediction=self.M[m]['client'].nextTruthPrediction(self.predictedField)
if(not updatedTruth):
self.truth.append(truth)
updatedTruth=True
self.predictionStreams[m].append(prediction)
self.Scores[m]=computeAAE(self.truth, self.predictionStreams[m],windowSize)
def getStableVote(scores, stableSize, votes, currModel):
scores = sorted(scores, key=lambda t: t[0])[:stableSize]
median=True
if not median:
for s in scores:
if s[3]==currModel:
print [(score[0], score[3]) for score in scores]
return s[1], currModel
print [(s[0], s[3]) for s in scores], "switching voting Model!"
return scores[0][1], scores[0][3]
else:
print [(s[0], s[3]) for s in scores]
voters = sorted(scores, key=lambda t: t[1])
for voter in voters:
votes[voter[3]]=votes[voter[3]]+1
vote=voters[int(stableSize/2)][1]
return vote, currModel
def getFieldPermutations(config, predictedField):
encoders=config['modelParams']['sensorParams']['encoders']
encoderList=[]
for encoder in encoders:
if encoder==None:
continue
if encoder['name']==predictedField:
encoderList.append([encoder])
for e in encoders:
if e==None:
continue
if e['name'] != predictedField:
encoderList.append([encoder, e])
return encoderList
def getModelDescriptionLists(numProcesses, experiment):
config, control = helpers.loadExperiment(experiment)
encodersList=getFieldPermutations(config, 'pounds')
ns=range(50, 140, 120)
clAlphas=np.arange(0.01, 0.16, 0.104)
synPermInactives=np.arange(0.01, 0.16, 0.105)
tpPamLengths=range(5, 8, 2)
tpSegmentActivations=range(13, 17, 12)
if control['environment'] == 'opfExperiment':
experimentTasks = control['tasks']
task = experimentTasks[0]
datasetURI = task['dataset']['streams'][0]['source']
elif control['environment'] == 'nupic':
datasetURI = control['dataset']['streams'][0]['source']
metricSpecs = control['metrics']
datasetPath = datasetURI[len("file://"):]
ModelSetUpData=[]
name=0
for n in ns:
for clAlpha in clAlphas:
for synPermInactive in synPermInactives:
for tpPamLength in tpPamLengths:
for tpSegmentActivation in tpSegmentActivations:
for encoders in encodersList:
encodersmod=copy.deepcopy(encoders)
configmod=copy.deepcopy(config)
configmod['modelParams']['sensorParams']['encoders']=encodersmod
configmod['modelParams']['clParams']['alpha']=clAlpha
configmod['modelParams']['spParams']['synPermInactiveDec']=synPermInactive
configmod['modelParams']['tmParams']['pamLength']=tpPamLength
configmod['modelParams']['tmParams']['activationThreshold']=tpSegmentActivation
for encoder in encodersmod:
if encoder['name']==predictedField:
encoder['n']=n
ModelSetUpData.append((name,{'modelConfig':configmod, 'inferenceArgs':control['inferenceArgs'], 'metricSpecs':metricSpecs, 'sourceSpec':datasetPath,'sinkSpec':None,}))
name=name+1
#print modelInfo['modelConfig']['modelParams']['tmParams']
#print modelInfo['modelConfig']['modelParams']['sensorParams']['encoders'][4]['n']
print "num Models"+str( len(ModelSetUpData))
shuffle(ModelSetUpData)
#print [ (m[1]['modelConfig']['modelParams']['tmParams']['pamLength'], m[1]['modelConfig']['modelParams']['sensorParams']['encoders']) for m in ModelSetUpData]
return list(chunk(ModelSetUpData,numProcesses))
def chunk(l, n):
""" Yield n successive chunks from l.
"""
newn = int(1.0 * len(l) / n + 0.5)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
def command(command, work_queues, aux):
for queue in work_queues:
queue.put((command, aux))
def getDuplicateList(streams, delta):
delList=[]
keys=streams.keys()
for key1 in keys:
if key1 in streams:
for key2 in streams.keys():
if(key1 !=key2):
print 'comparing model'+str(key1)+" to "+str(key2)
dist=sum([(a-b)**2 for a, b in zip(streams[key1], streams[key2])])
print dist
if(dist<delta):
delList.append(key2)
del streams[key2]
return delList
def slice_sampler(px, N = 1, x = None):
"""
Provides samples from a user-defined distribution.
slice_sampler(px, N = 1, x = None)
Inputs:
px = A discrete probability distribution.
N = Number of samples to return, default is 1
x = Optional list/array of observation values to return, where prob(x) = px.
Outputs:
If x=None (default) or if len(x) != len(px), it will return an array of integers
between 0 and len(px)-1. If x is supplied, it will return the
samples from x according to the distribution px.
"""
values = np.zeros(N, dtype=np.int)
samples = np.arange(len(px))
px = np.array(px) / (1.*sum(px))
u = uniform(0, max(px))
for n in xrange(N):
included = px>=u
choice = random.sample(range(np.sum(included)), 1)[0]
values[n] = samples[included][choice]
u = uniform(0, px[included][choice])
if x:
if len(x) == len(px):
x=np.array(x)
values = x[values]
else:
print "px and x are different lengths. Returning index locations for px."
return values
def getPSOVariants(modelInfos, votes, n):
# get x, px lists for sampling
norm=sum(votes.values())
xpx =[(m, float(votes[m])/norm) for m in votes.keys()]
x,px = [[z[i] for z in xpx] for i in (0,1)]
#sample form set of models
variantIDs=slice_sampler(px, n, x)
print "variant IDS"
print variantIDs
#best X
x_best=modelInfos[0][2][0]
# create PSO variates of models
modelDescriptions=[]
for variantID in variantIDs:
t=modelInfos[[i for i, v in enumerate(modelInfos) if v[0] == variantID][0]]
x=t[2][0]
v=t[2][1]
print "old x"
print x
modelDescriptionMod=copy.deepcopy(t[3])
configmod=modelDescriptionMod['modelConfig']
v=inertia*v+socRate*np.random.random_sample(len(v))*(x_best-x)
x=x+v
print "new x"
print x
configmod['modelParams']['clParams']['alpha']=max(0.01, x[0])
configmod['modelParams']['spParams']['synPermInactiveDec']=max(0.01, x[2])
configmod['modelParams']['tmParams']['pamLength']=int(round(max(1, x[4])))
configmod['modelParams']['tmParams']['activationThreshold']=int(round(max(1, x[3])))
for encoder in configmod['modelParams']['sensorParams']['encoders']:
if encoder['name']==predictedField:
encoder['n']=int(round(max(encoder['w']+1, x[1]) ))
modelDescriptions.append((modelDescriptionMod, x, v))
return modelDescriptions
def computeAAE(truth, predictions, windowSize):
windowSize=min(windowSize, len(truth))
zipped=zip(truth[-windowSize:], predictions[-windowSize-1:])
AAE=sum([abs(a - b) for a, b in zipped])/windowSize
return AAE
if __name__ == "__main__":
cutPercentage=0.1
currModel=0
stableSize=3
delta=1
predictedField='pounds'
truth=[]
ensemblePredictions=[0,]
divisor=4
ModelSetUpData=getModelDescriptionLists(divisor, './')
num_processes=len(ModelSetUpData)
print num_processes
work_queues=[]
votes={}
votingParameterStats={"tpSegmentActivationThreshold":[], "tpPamLength":[], "synPermInactiveDec":[], "clAlpha":[], "numBuckets":[]}
# create a queue to pass to workers to store the results
result_queue = multiprocessing.Queue(len(ModelSetUpData))
# spawn workers
workerName=0
modelNameCount=0
for modelData in ModelSetUpData:
print len(modelData)
modelNameCount+=len(modelData)
work_queue= multiprocessing.Queue()
work_queues.append(work_queue)
worker = Worker(work_queue, result_queue, stableSize, windowSize, predictedField, modelData, workerName)
worker.start()
workerName=workerName+1
#init votes dict
for dataList in ModelSetUpData:
for data in dataList:
votes[data[0]]=0
for i in range(2120):
command('predict', work_queues, i)
scores=[]
for j in range(num_processes):
subscore=result_queue.get()
scores.extend(subscore)
print ""
print i
ensemblePrediction, currModel=getStableVote(scores, stableSize, votes, currModel)
ensemblePredictions.append(ensemblePrediction)
truth.append(scores[0][2])
print computeAAE(truth,ensemblePredictions, windowSize), int(currModel)
assert(result_queue.empty())
if i%r==0 and i!=0: #refresh ensemble
assert(result_queue.empty())
#get AAES of models over last i records
command('getAAEs', work_queues, None)
AAEs=[]
for j in range(num_processes):
subAAEs=result_queue.get()
AAEs.extend(subAAEs)
AAEs=sorted(AAEs, key=lambda t: t[1])
numToDelete=int(round(cutPercentage*len(AAEs)))
print "Single Model AAES"
print [(aae[0], aae[1]) for aae in AAEs]
print "Ensemble AAE"
print computeAAE(truth, ensemblePredictions, r)
#add bottom models to delList
print "Vote counts"
print votes
delList=[t[0] for t in AAEs[-numToDelete:]]
print "delList"
print delList
#find duplicate models(now unnecessary)
#command('getPredictionStreams', work_queues, None)
#streams={}
#for j in range(num_processes):
# subList=result_queue.get()
# streams.update(subList)
#delList.extend(getDuplicateList(streams, delta))
#print delList
command('delete', work_queues, delList)
for iden in delList:
del votes[iden]
print votes
#wait for deletion to finish and collect processIndices for addition
processIndices=[]
for j in range(num_processes):
processIndices.append( result_queue.get())
# pick new set of models for PSO variants
newModelDescriptions=getPSOVariants(AAEs, votes, len(delList))
assert(result_queue.empty())
#send new model dscriptions to queue and have processess pick them up
aux=[]
for i in range(len(newModelDescriptions)):
votes[modelNameCount]=0
aux.append((processIndices[i],newModelDescriptions[i],modelNameCount) )
modelNameCount=modelNameCount+1
command('addPSOVariants', work_queues, aux)
#set votes to 0
for key in votes.keys():
votes[key]=0
print "AAE over full stream"
print computeAAE(truth, ensemblePredictions, len(truth))
print "AAE1000"
print computeAAE(truth, ensemblePredictions, 1000)
| 15,348 | Python | .py | 377 | 33.233422 | 183 | 0.649185 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,856 | test_all.py | numenta_nupic-legacy/examples/opf/experiments/params/test_all.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Script for trying different model parameters for existing experiments."""
import os
from pprint import pprint
import time
from nupic.frameworks.opf import helpers
from nupic.frameworks.opf.client import Client
# Experiment directories relative to "trunk/examples/opf/experiments."
EXPERIMENTS_FILE = 'successful_experiments.txt'
def testAll(experiments):
experimentsDir = os.path.join(os.path.split(
os.path.dirname(__file__))[:-1])[0]
for experiment in experiments:
experimentBase = os.path.join(os.getcwd(), experimentsDir, experiment)
config, control = helpers.loadExperiment(experimentBase)
if control['environment'] == 'opfExperiment':
experimentTasks = control['tasks']
task = experimentTasks[0]
datasetURI = task['dataset']['streams'][0]['source']
elif control['environment'] == 'nupic':
datasetURI = control['dataset']['streams'][0]['source']
metricSpecs = control['metrics']
datasetPath = datasetURI[len("file://"):]
for i in xrange(1024, 2176, 128):
#config['modelParams']['tmParams']['cellsPerColumn'] = 16
config['modelParams']['tmParams']['columnCount'] = i
config['modelParams']['spParams']['columnCount'] = i
print 'Running with 32 cells per column and %i columns.' % i
start = time.time()
result = runOneExperiment(config, control['inferenceArgs'], metricSpecs,
datasetPath)
print 'Total time: %d.' % (time.time() - start)
pprint(result)
def runOneExperiment(modelConfig, inferenceArgs, metricSpecs, sourceSpec,
sinkSpec=None):
client = Client(modelConfig, inferenceArgs, metricSpecs, sourceSpec, sinkSpec)
return client.run().metrics
if __name__ == '__main__':
# Because of the duration of some experiments, it is often better to do one
# at a time.
testAll(('anomaly/temporal/saw_big',))
| 2,873 | Python | .py | 60 | 43.75 | 80 | 0.685847 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,857 | makeDatasets.py | numenta_nupic-legacy/examples/opf/experiments/classification/makeDatasets.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import os
import random
from optparse import OptionParser
from nupic.data.file_record_stream import FileRecordStream
def _generateCategory(filename="simple.csv", numSequences=2, elementsPerSeq=1,
numRepeats=10, resets=False):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
resets: if True, turn on reset at start of each sequence
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('reset', 'int', 'R'), ('category', 'int', 'C'),
('field1', 'string', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
reset = int(resets)
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([reset, str(seqIdx), str(x)])
reset = 0
outFile.close()
def _generateScalar(filename="simple.csv", numSequences=2, elementsPerSeq=1,
numRepeats=10, stepSize=0.1, resets=False):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences of scalar values.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
stepSize: how far apart each scalar is
resets: if True, turn on reset at start of each sequence
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('reset', 'int', 'R'), ('category', 'int', 'C'),
('field1', 'float', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
reset = int(resets)
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([reset, str(seqIdx), x*stepSize])
reset = 0
outFile.close()
def _generateOverlapping(filename="overlap.csv", numSequences=2, elementsPerSeq=3,
numRepeats=10, hub=[0,1], hubOffset=1, resets=False):
""" Generate a temporal dataset containing sequences that overlap one or more
elements with other sequences.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
hub: sub-sequence to place within each other sequence
hubOffset: where, within each sequence, to place the hub
resets: if True, turn on reset at start of each sequence
"""
# Check for conflicts in arguments
assert (hubOffset + len(hub) <= elementsPerSeq)
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('reset', 'int', 'R'), ('category', 'int', 'C'),
('field1', 'string', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences with the hub in the middle
sequences = []
nextElemIdx = max(hub)+1
for _ in range(numSequences):
seq = []
for j in range(hubOffset):
seq.append(nextElemIdx)
nextElemIdx += 1
for j in hub:
seq.append(j)
j = hubOffset + len(hub)
while j < elementsPerSeq:
seq.append(nextElemIdx)
nextElemIdx += 1
j += 1
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for _ in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
reset = int(resets)
seq = sequences[seqIdx]
for (x) in seq:
outFile.appendRecord([reset, str(seqIdx), str(x)])
reset = 0
outFile.close()
if __name__ == '__main__':
helpString = \
"""%prog [options] <datasetName>
Generate artifical datasets for testing classification """
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("No arguments accepted")
# Set random seed
random.seed(42)
# Create the dataset directory if necessary
datasetsDir = os.path.join(os.path.dirname(__file__), 'datasets')
if not os.path.exists(datasetsDir):
os.mkdir(datasetsDir)
# Generate the category field datasets
_generateCategory('category_SP_0.csv', numSequences=2, elementsPerSeq=1,
numRepeats=20)
_generateCategory('category_SP_1.csv', numSequences=50, elementsPerSeq=1,
numRepeats=20)
_generateCategory('category_TM_0.csv', numSequences=2, elementsPerSeq=5,
numRepeats=30)
_generateCategory('category_TM_1.csv', numSequences=10, elementsPerSeq=5,
numRepeats=20)
_generateOverlapping('category_hub_TP_0.csv', numSequences=10, elementsPerSeq=5,
numRepeats=20, hub=[0,1], hubOffset=1, resets=False)
# Generate the scalar field datasets
_generateScalar('scalar_SP_0.csv', numSequences=2, elementsPerSeq=1,
numRepeats=20, stepSize=0.1, resets=False)
_generateScalar('scalar_TM_0.csv', numSequences=2, elementsPerSeq=5,
numRepeats=20, stepSize=0.1, resets=False)
_generateScalar('scalar_TM_1.csv', numSequences=10, elementsPerSeq=5,
numRepeats=20, stepSize=0.1, resets=False)
| 8,360 | Python | .py | 193 | 37.647668 | 83 | 0.657114 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,858 | description.py | numenta_nupic-legacy/examples/opf/experiments/classification/category_TM_1/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_TM_1.csv'),
'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { 'spVerbosity': 0},
'tmEnable': True,
'tmParams': { 'verbosity': 0}}}
mod = importBaseDescription('../base_category/description.py', config)
locals().update(mod.__dict__)
| 1,683 | Python | .py | 35 | 43.514286 | 78 | 0.635036 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,859 | description.py | numenta_nupic-legacy/examples/opf/experiments/classification/category_hub_TP_0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'claEvalClassification': True,
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_hub_TP_0.csv'),
'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmEnable': True,
'tmParams': { }}}
mod = importBaseDescription('../base_category/description.py', config)
locals().update(mod.__dict__)
| 1,688 | Python | .py | 35 | 43.657143 | 78 | 0.63675 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,860 | description.py | numenta_nupic-legacy/examples/opf/experiments/classification/category_TM_0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_TM_0.csv'),
'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmEnable': True,
'tmParams': { }}}
mod = importBaseDescription('../base_category/description.py', config)
locals().update(mod.__dict__)
| 1,653 | Python | .py | 35 | 42.657143 | 78 | 0.633209 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,861 | description.py | numenta_nupic-legacy/examples/opf/experiments/classification/scalar_TP_1/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'claEvalClassification': True,
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/scalar_TM_1.csv'),
'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmEnable': True,
'tmParams': { }}}
mod = importBaseDescription('../base_scalar/description.py', config)
locals().update(mod.__dict__)
| 1,680 | Python | .py | 35 | 43.428571 | 78 | 0.635588 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,862 | description.py | numenta_nupic-legacy/examples/opf/experiments/classification/scalar_encoder_0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/scalar_SP_0.csv'),
'modelParams': { 'clParams': { 'verbosity': 0},
'inferenceType': 'NontemporalClassification',
'sensorParams': { 'encoders': { 'field1': { 'clipInput': True,
'fieldname': u'field1',
'maxval': 0.10000000000000001,
'minval': 0.0,
'n': 11,
'name': u'field1',
'type': 'AdaptiveScalarEncoder',
'w': 7}},
'verbosity': 0},
'spEnable': False,
'spParams': { 'spVerbosity': 0},
'tmEnable': False,
'tmParams': { }}}
mod = importBaseDescription('../base_scalar/description.py', config)
locals().update(mod.__dict__)
| 2,416 | Python | .py | 45 | 38.444444 | 95 | 0.502746 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,863 | description.py | numenta_nupic-legacy/examples/opf/experiments/classification/scalar_TP_0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'claEvalClassification': True,
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/scalar_TM_0.csv'),
'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmEnable': True,
'tmParams': { }}}
mod = importBaseDescription('../base_scalar/description.py', config)
locals().update(mod.__dict__)
| 1,680 | Python | .py | 35 | 43.428571 | 78 | 0.635588 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,864 | description.py | numenta_nupic-legacy/examples/opf/experiments/classification/base_scalar/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalClassification',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'field1': { 'clipInput': True,
'fieldname': u'field1',
'n': 438,
'name': u'field1',
'type': 'AdaptiveScalarEncoder',
'w': 7,
'forced': True}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 1.0,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.05,
'synPermInactiveDec': 0.008,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 0.0,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 14,
'outputType': 'activeState',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'KNNClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
'distanceMethod': 'pctOverlapOfProto',
'cellsPerCol': 32,
'k': 1,
'outputProbabilitiesByDist': 1,
'maxCategoryCount': 100,
},
'trainSPNetOnlyIfRequested': False,
},
'claTrainSPNetOnlyIfRequested': True,
'dataSource': 'fillInBySubExperiment',
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# With no TM, there are no columns
if not config['modelParams']['tmEnable']:
config['modelParams']['clParams']['cellsPerCol'] = 0
control = {
# The environment that the current model is being run in
"environment": 'opfExperiment',
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPF Model directly do not make use of
# the tasks specification.
#
"tasks":[
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "OnlineLearning",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'test_NoProviders',
u'streams': [ {
u'columns': [u'*'],
u'info': u'simple.csv',
'source': config['dataSource'],
}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000),
#IterationPhaseSpecInferOnly(10),
],
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(metric='avg_err', inferenceElement='classification',
params={'window': 200}),
MetricSpec(metric='neg_auc', inferenceElement='classConfidences',
params={'window': 200, 'computeEvery': 10}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*avg_err.*', '.*auc.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPF Model>); returns nothing
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPF Model>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPF Model>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 16,382 | Python | .py | 351 | 37.535613 | 108 | 0.633041 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,865 | description.py | numenta_nupic-legacy/examples/opf/experiments/classification/base_category/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalClassification',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'field1': { 'fieldname': u'field1',
'n': 100,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 1.0,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.05,
'synPermInactiveDec': 0.008,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 0.0,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 10,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 14,
'outputType': 'activeState',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'KNNClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
'distanceMethod': 'pctOverlapOfProto',
'cellsPerCol': 32,
'k': 1,
'outputProbabilitiesByDist': 1,
'maxCategoryCount': 100,
},
'trainSPNetOnlyIfRequested': False,
},
'claTrainSPNetOnlyIfRequested': True,
'dataSource': 'fillInBySubExperiment',
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# With no TM, there are no columns
if not config['modelParams']['tmEnable']:
config['modelParams']['clParams']['cellsPerCol'] = 0
control = {
# The environment that the current model is being run in
"environment": 'opfExperiment',
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPF Model directly do not make use of
# the tasks specification.
#
"tasks":[
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "OnlineLearning",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'test_NoProviders',
u'streams': [ {
u'columns': [u'*'],
u'info': u'simple.csv',
'source': config['dataSource'],
}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000),
#IterationPhaseSpecInferOnly(10),
],
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(metric='avg_err', inferenceElement='classification',
params={'window': 200}),
MetricSpec(metric='neg_auc', inferenceElement='classConfidences',
params={'window': 200, 'computeEvery': 10}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*avg_err.*', '.*auc.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPF Model>); returns nothing
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPF Model>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPF Model>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 16,306 | Python | .py | 349 | 37.65043 | 108 | 0.634417 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,866 | NuPIC Walkthrough-checkpoint.ipynb | numenta_nupic-legacy/examples/.ipynb_checkpoints/NuPIC Walkthrough-checkpoint.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Encoders\n",
"\n",
"* Scalar\n",
"* Date/time\n",
"* Category\n",
"* Multi"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"import numpy"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.encoders import ScalarEncoder\n",
"\n",
"ScalarEncoder?"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3 = [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n",
"4 = [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n",
"5 = [0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"# 22 bits with 3 active representing values 0 to 100\n",
"# clipInput=True makes values >100 encode the same as 100 (instead of throwing a ValueError)\n",
"# forced=True allows small values for `n` and `w`\n",
"enc = ScalarEncoder(n=22, w=3, minval=2.5, maxval=97.5, clipInput=True, forced=True)\n",
"print \"3 =\", enc.encode(3)\n",
"print \"4 =\", enc.encode(4)\n",
"print \"5 =\", enc.encode(5)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"100 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n",
"1000 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n"
]
}
],
"source": [
"# Encode maxval\n",
"print \"100 =\", enc.encode(100)\n",
"# See that any larger number gets the same encoding\n",
"print \"1000 =\", enc.encode(1000)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder\n",
"\n",
"RandomDistributedScalarEncoder?"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1]\n",
"4 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1]\n",
"5 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1]\n",
"\n",
"100 = [0 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n",
"1000 = [0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0]\n"
]
}
],
"source": [
"# 21 bits with 3 active with buckets of size 5\n",
"rdse = RandomDistributedScalarEncoder(n=21, w=3, resolution=5, offset=2.5)\n",
"\n",
"print \"3 = \", rdse.encode(3)\n",
"print \"4 = \", rdse.encode(4)\n",
"print \"5 = \", rdse.encode(5)\n",
"print\n",
"print \"100 = \", rdse.encode(100)\n",
"print \"1000 =\", rdse.encode(1000)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"import datetime\n",
"from nupic.encoders.date import DateEncoder\n",
"\n",
"DateEncoder?"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"now = [0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0]\n",
"next month = [0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0]\n",
"xmas = [1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n"
]
}
],
"source": [
"de = DateEncoder(season=5)\n",
"\n",
"now = datetime.datetime.strptime(\"2014-05-02 13:08:58\", \"%Y-%m-%d %H:%M:%S\")\n",
"print \"now = \", de.encode(now)\n",
"nextMonth = datetime.datetime.strptime(\"2014-06-02 13:08:58\", \"%Y-%m-%d %H:%M:%S\")\n",
"print \"next month =\", de.encode(nextMonth)\n",
"xmas = datetime.datetime.strptime(\"2014-12-25 13:08:58\", \"%Y-%m-%d %H:%M:%S\")\n",
"print \"xmas = \", de.encode(xmas)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"cat = [0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n",
"dog = [0 0 0 0 0 0 1 1 1 0 0 0 0 0 0]\n",
"monkey = [0 0 0 0 0 0 0 0 0 1 1 1 0 0 0]\n",
"slow loris = [0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n"
]
}
],
"source": [
"from nupic.encoders.category import CategoryEncoder\n",
"\n",
"categories = (\"cat\", \"dog\", \"monkey\", \"slow loris\")\n",
"encoder = CategoryEncoder(w=3, categoryList=categories, forced=True)\n",
"cat = encoder.encode(\"cat\")\n",
"dog = encoder.encode(\"dog\")\n",
"monkey = encoder.encode(\"monkey\")\n",
"loris = encoder.encode(\"slow loris\")\n",
"print \"cat = \", cat\n",
"print \"dog = \", dog\n",
"print \"monkey = \", monkey\n",
"print \"slow loris =\", loris"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"print encoder.encode(None)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[1 1 1 0 0 0 0 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"print encoder.encode(\"unknown\")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"({'category': ([(1, 1)], 'cat')}, ['category'])\n"
]
}
],
"source": [
"print encoder.decode(cat)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"({'category': ([(1, 2)], 'cat, dog')}, ['category'])\n"
]
}
],
"source": [
"catdog = numpy.array([0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0])\n",
"print encoder.decode(catdog)"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Spatial Pooler"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.research.spatial_pooler import SpatialPooler\n",
"\n",
"print SpatialPooler?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"15\n",
"[0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"print len(cat)\n",
"print cat"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 1 1 1 1 0 0 0 0 1 1 1 1 0]\n",
"[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n",
"[1 1 0 0 0 0 0 1 1 1 1 1 1 0 0]\n",
"[1 1 0 1 1 0 0 1 1 0 1 0 0 1 1]\n"
]
}
],
"source": [
"sp = SpatialPooler(inputDimensions=(15,),\n",
" columnDimensions=(4,),\n",
" potentialRadius=15,\n",
" numActiveColumnsPerInhArea=1,\n",
" globalInhibition=True,\n",
" synPermActiveInc=0.03,\n",
" potentialPct=1.0)\n",
"import numpy\n",
"for column in xrange(4):\n",
" connected = numpy.zeros((15,), dtype=\"int\")\n",
" sp.getConnectedSynapses(column, connected)\n",
" print connected"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[1 0 0 0]\n"
]
}
],
"source": [
"output = numpy.zeros((4,), dtype=\"int\")\n",
"sp.compute(cat, learn=True, activeArray=output)\n",
"print output"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"for _ in xrange(20):\n",
" sp.compute(cat, learn=True, activeArray=output)"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 1 1 1 1 0 0 0 0 1 1 1 1 0]\n",
"[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n",
"[1 1 0 0 0 0 0 1 1 1 1 1 1 0 0]\n",
"[1 1 0 1 1 0 0 1 1 0 1 0 0 1 1]\n"
]
}
],
"source": [
"for column in xrange(4):\n",
" connected = numpy.zeros((15,), dtype=\"int\")\n",
" sp.getConnectedSynapses(column, connected)\n",
" print connected"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"for _ in xrange(200):\n",
" sp.compute(cat, learn=True, activeArray=output)\n",
" sp.compute(dog, learn=True, activeArray=output)\n",
" sp.compute(monkey, learn=True, activeArray=output)\n",
" sp.compute(loris, learn=True, activeArray=output)"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n",
"[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n",
"[0 0 0 0 0 0 0 0 0 1 1 1 0 0 0]\n",
"[0 0 0 0 0 0 1 1 1 0 0 0 1 1 1]\n"
]
}
],
"source": [
"for column in xrange(4):\n",
" connected = numpy.zeros((15,), dtype=\"int\")\n",
" sp.getConnectedSynapses(column, connected)\n",
" print connected"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 0 1 1 0 1 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"noisyCat = numpy.zeros((15,), dtype=\"uint32\")\n",
"noisyCat[3] = 1\n",
"noisyCat[4] = 1\n",
"# This is part of dog!\n",
"noisyCat[6] = 1\n",
"print noisyCat"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 1 0 0]\n"
]
}
],
"source": [
"sp.compute(noisyCat, learn=False, activeArray=output)\n",
"print output # matches cat!"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Temporal Memory (a.k.a. Sequence Memory, Temporal Pooler)\n",
"\n",
"From: `examples/tm/hello_tm.py`"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.research.BacktrackingTM import BacktrackingTM\n",
"\n",
"BacktrackingTM?"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Step 1: create Temporal Pooler instance with appropriate parameters\n",
"tm = BacktrackingTM(numberOfCols=50, cellsPerColumn=2,\n",
" initialPerm=0.5, connectedPerm=0.5,\n",
" minThreshold=10, newSynapseCount=10,\n",
" permanenceInc=0.1, permanenceDec=0.0,\n",
" activationThreshold=8,\n",
" globalDecay=0, burnIn=1,\n",
" checkSynapseConsistency=False,\n",
" pamLength=10)"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Step 2: create input vectors to feed to the temporal memory. Each input vector\n",
"# must be numberOfCols wide. Here we create a simple sequence of 5 vectors\n",
"# representing the sequence A -> B -> C -> D -> E\n",
"x = numpy.zeros((5, tm.numberOfCols), dtype=\"uint32\")\n",
"x[0,0:10] = 1 # Input SDR representing \"A\", corresponding to columns 0-9\n",
"x[1,10:20] = 1 # Input SDR representing \"B\", corresponding to columns 10-19\n",
"x[2,20:30] = 1 # Input SDR representing \"C\", corresponding to columns 20-29\n",
"x[3,30:40] = 1 # Input SDR representing \"D\", corresponding to columns 30-39\n",
"x[4,40:50] = 1 # Input SDR representing \"E\", corresponding to columns 40-49"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Step 3: send this simple sequence to the temporal memory for learning\n",
"# We repeat the sequence 10 times\n",
"for i in range(10):\n",
"\n",
" # Send each letter in the sequence in order\n",
" for j in range(5):\n",
"\n",
" # The compute method performs one step of learning and/or inference. Note:\n",
" # here we just perform learning but you can perform prediction/inference and\n",
" # learning in the same step if you want (online learning).\n",
" tm.compute(x[j], enableLearn = True, computeInfOutput = False)\n",
"\n",
" # This function prints the segments associated with every cell.$$$$\n",
" # If you really want to understand the TP, uncomment this line. By following\n",
" # every step you can get an excellent understanding for exactly how the TP\n",
" # learns.\n",
" #tm.printCells()\n",
"\n",
" # The reset command tells the TM that a sequence just ended and essentially\n",
" # zeros out all the states. It is not strictly necessary but it's a bit\n",
" # messier without resets, and the TM learns quicker with resets.\n",
" tm.reset()"
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"-------- A -----------\n",
"Raw input vector\n",
"1111111111 0000000000 0000000000 0000000000 0000000000 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"1111111111 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 1111111111 0000000000 0000000000 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[10 11 12 13 14 15 16 17 18 19] \n",
"\n",
"\n",
"-------- B -----------\n",
"Raw input vector\n",
"0000000000 1111111111 0000000000 0000000000 0000000000 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 1111111111 0000000000 0000000000 0000000000 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 1111111111 0000000000 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[20 21 22 23 24 25 26 27 28 29] \n",
"\n",
"\n",
"-------- C -----------\n",
"Raw input vector\n",
"0000000000 0000000000 1111111111 0000000000 0000000000 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 1111111111 0000000000 0000000000 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 1111111111 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[30 31 32 33 34 35 36 37 38 39] \n",
"\n",
"\n",
"-------- D -----------\n",
"Raw input vector\n",
"0000000000 0000000000 0000000000 1111111111 0000000000 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 1111111111 0000000000 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 0000000000 1111111111 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[40 41 42 43 44 45 46 47 48 49] \n",
"\n",
"\n",
"-------- E -----------\n",
"Raw input vector\n",
"0000000000 0000000000 0000000000 0000000000 1111111111 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 0000000000 1111111111 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[] \n"
]
}
],
"source": [
"# Step 4: send the same sequence of vectors and look at predictions made by\n",
"# temporal memory\n",
"\n",
"# Utility routine for printing the input vector\n",
"def formatRow(x):\n",
" s = ''\n",
" for c in range(len(x)):\n",
" if c > 0 and c % 10 == 0:\n",
" s += ' '\n",
" s += str(x[c])\n",
" s += ' '\n",
" return s\n",
"\n",
"for j in range(5):\n",
" print \"\\n\\n--------\",\"ABCDE\"[j],\"-----------\"\n",
" print \"Raw input vector\\n\",formatRow(x[j])\n",
"\n",
" # Send each vector to the TP, with learning turned off\n",
" tm.compute(x[j], enableLearn=False, computeInfOutput=True)\n",
"\n",
" # This method prints out the active state of each cell followed by the\n",
" # predicted state of each cell. For convenience the cells are grouped\n",
" # 10 at a time. When there are multiple cells per column the printout\n",
" # is arranged so the cells in a column are stacked together\n",
" #\n",
" # What you should notice is that the columns where active state is 1\n",
" # represent the SDR for the current input pattern and the columns where\n",
" # predicted state is 1 represent the SDR for the next expected pattern\n",
" print \"\\nAll the active and predicted cells:\"\n",
" tm.printStates(printPrevious=False, printLearnState=False)\n",
"\n",
" # tm.getPredictedState() gets the predicted cells.\n",
" # predictedCells[c][i] represents the state of the i'th cell in the c'th\n",
" # column. To see if a column is predicted, we can simply take the OR\n",
" # across all the cells in that column. In numpy we can do this by taking\n",
" # the max along axis 1.\n",
" print \"\\n\\nThe following columns are predicted by the temporal memory. This\"\n",
" print \"should correspond to columns in the *next* item in the sequence.\"\n",
" predictedCells = tm.getPredictedState()\n",
" print formatRow(predictedCells.max(axis=1).nonzero())"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Networks and Regions\n",
"\n",
"See slides."
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Online Prediction Framework\n",
"\n",
"* CLAModel\n",
"* OPF Client\n",
"* Swarming\n",
"\n",
"# CLAModel\n",
"\n",
"From `examples/opf/clients/hotgym/simple/hotgym.py`"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Model Parameters\n",
"\n",
"`MODEL_PARAMS` have all of the parameters for the CLA model and subcomponents"
]
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Model Params!\n",
"MODEL_PARAMS = {\n",
" # Type of model that the rest of these parameters apply to.\n",
" 'model': \"HTMPrediction\",\n",
"\n",
" # Version that specifies the format of the config.\n",
" 'version': 1,\n",
"\n",
" # Intermediate variables used to compute fields in modelParams and also\n",
" # referenced from the control section.\n",
" 'aggregationInfo': { 'days': 0,\n",
" 'fields': [('consumption', 'sum')],\n",
" 'hours': 1,\n",
" 'microseconds': 0,\n",
" 'milliseconds': 0,\n",
" 'minutes': 0,\n",
" 'months': 0,\n",
" 'seconds': 0,\n",
" 'weeks': 0,\n",
" 'years': 0},\n",
"\n",
" 'predictAheadTime': None,\n",
"\n",
" # Model parameter dictionary.\n",
" 'modelParams': {\n",
" # The type of inference that this model will perform\n",
" 'inferenceType': 'TemporalMultiStep',\n",
"\n",
" 'sensorParams': {\n",
" # Sensor diagnostic output verbosity control;\n",
" # if > 0: sensor region will print out on screen what it's sensing\n",
" # at each step 0: silent; >=1: some info; >=2: more info;\n",
" # >=3: even more info (see compute() in py/regions/RecordSensor.py)\n",
" 'verbosity' : 0,\n",
"\n",
" # Include the encoders we use\n",
" 'encoders': {\n",
" u'timestamp_timeOfDay': {\n",
" 'fieldname': u'timestamp',\n",
" 'name': u'timestamp_timeOfDay',\n",
" 'timeOfDay': (21, 0.5),\n",
" 'type': 'DateEncoder'\n",
" },\n",
" u'timestamp_dayOfWeek': None,\n",
" u'timestamp_weekend': None,\n",
" u'consumption': {\n",
" 'clipInput': True,\n",
" 'fieldname': u'consumption',\n",
" 'maxval': 100.0,\n",
" 'minval': 0.0,\n",
" 'n': 50,\n",
" 'name': u'c1',\n",
" 'type': 'ScalarEncoder',\n",
" 'w': 21\n",
" },\n",
" },\n",
"\n",
" # A dictionary specifying the period for automatically-generated\n",
" # resets from a RecordSensor;\n",
" #\n",
" # None = disable automatically-generated resets (also disabled if\n",
" # all of the specified values evaluate to 0).\n",
" # Valid keys is the desired combination of the following:\n",
" # days, hours, minutes, seconds, milliseconds, microseconds, weeks\n",
" #\n",
" # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),\n",
" #\n",
" # (value generated from SENSOR_AUTO_RESET)\n",
" 'sensorAutoReset' : None,\n",
" },\n",
"\n",
" 'spEnable': True,\n",
"\n",
" 'spParams': {\n",
" # SP diagnostic output verbosity control;\n",
" # 0: silent; >=1: some info; >=2: more info;\n",
" 'spVerbosity' : 0,\n",
"\n",
" # Spatial Pooler implementation selector, see getSPClass\n",
" # in py/regions/SPRegion.py for details\n",
" # 'py' (default), 'cpp' (speed optimized, new)\n",
" 'spatialImp' : 'cpp',\n",
"\n",
" 'globalInhibition': 1,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
" # SP and TM)\n",
" # (see also tpNCellsPerCol)\n",
" 'columnCount': 2048,\n",
"\n",
" 'inputWidth': 0,\n",
"\n",
" # SP inhibition control (absolute value);\n",
" # Maximum number of active columns in the SP region's output (when\n",
" # there are more, the weaker ones are suppressed)\n",
" 'numActiveColumnsPerInhArea': 40,\n",
"\n",
" 'seed': 1956,\n",
"\n",
" # potentialPct\n",
" # What percent of the columns's receptive field is available\n",
" # for potential synapses. At initialization time, we will\n",
" # choose potentialPct * (2*potentialRadius+1)^2\n",
" 'potentialPct': 0.5,\n",
"\n",
" # The default connected threshold. Any synapse whose\n",
" # permanence value is above the connected threshold is\n",
" # a \"connected synapse\", meaning it can contribute to the\n",
" # cell's firing. Typical value is 0.10. Cells whose activity\n",
" # level before inhibition falls below minDutyCycleBeforeInh\n",
" # will have their own internal synPermConnectedCell\n",
" # threshold set below this default value.\n",
" # (This concept applies to both SP and TM and so 'cells'\n",
" # is correct here as opposed to 'columns')\n",
" 'synPermConnected': 0.1,\n",
"\n",
" 'synPermActiveInc': 0.1,\n",
"\n",
" 'synPermInactiveDec': 0.005,\n",
" },\n",
"\n",
" # Controls whether TM is enabled or disabled;\n",
" # TM is necessary for making temporal predictions, such as predicting\n",
" # the next inputs. Without TP, the model is only capable of\n",
" # reconstructing missing sensor inputs (via SP).\n",
" 'tmEnable' : True,\n",
"\n",
" 'tmParams': {\n",
" # TM diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n",
" 'verbosity': 0,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
" # SP and TM)\n",
" # (see also tpNCellsPerCol)\n",
" 'columnCount': 2048,\n",
"\n",
" # The number of cells (i.e., states), allocated per column.\n",
" 'cellsPerColumn': 32,\n",
"\n",
" 'inputWidth': 2048,\n",
"\n",
" 'seed': 1960,\n",
"\n",
" # Temporal Pooler implementation selector (see _getTPClass in\n",
" # CLARegion.py).\n",
" 'temporalImp': 'cpp',\n",
"\n",
" # New Synapse formation count\n",
" # NOTE: If None, use spNumActivePerInhArea\n",
" #\n",
" # TODO: need better explanation\n",
" 'newSynapseCount': 20,\n",
"\n",
" # Maximum number of synapses per segment\n",
" # > 0 for fixed-size CLA\n",
" # -1 for non-fixed-size CLA\n",
" #\n",
" # TODO: for Ron: once the appropriate value is placed in TP\n",
" # constructor, see if we should eliminate this parameter from\n",
" # description.py.\n",
" 'maxSynapsesPerSegment': 32,\n",
"\n",
" # Maximum number of segments per cell\n",
" # > 0 for fixed-size CLA\n",
" # -1 for non-fixed-size CLA\n",
" #\n",
" # TODO: for Ron: once the appropriate value is placed in TP\n",
" # constructor, see if we should eliminate this parameter from\n",
" # description.py.\n",
" 'maxSegmentsPerCell': 128,\n",
"\n",
" # Initial Permanence\n",
" # TODO: need better explanation\n",
" 'initialPerm': 0.21,\n",
"\n",
" # Permanence Increment\n",
" 'permanenceInc': 0.1,\n",
"\n",
" # Permanence Decrement\n",
" # If set to None, will automatically default to tpPermanenceInc\n",
" # value.\n",
" 'permanenceDec' : 0.1,\n",
"\n",
" 'globalDecay': 0.0,\n",
"\n",
" 'maxAge': 0,\n",
"\n",
" # Minimum number of active synapses for a segment to be considered\n",
" # during search for the best-matching segments.\n",
" # None=use default\n",
" # Replaces: tpMinThreshold\n",
" 'minThreshold': 9,\n",
"\n",
" # Segment activation threshold.\n",
" # A segment is active if it has >= tpSegmentActivationThreshold\n",
" # connected synapses that are active due to infActiveState\n",
" # None=use default\n",
" # Replaces: tpActivationThreshold\n",
" 'activationThreshold': 12,\n",
"\n",
" 'outputType': 'normal',\n",
"\n",
" # \"Pay Attention Mode\" length. This tells the TM how many new\n",
" # elements to append to the end of a learned sequence at a time.\n",
" # Smaller values are better for datasets with short sequences,\n",
" # higher values are better for datasets with long sequences.\n",
" 'pamLength': 1,\n",
" },\n",
"\n",
" 'clParams': {\n",
" 'regionName' : 'SDRClassifierRegion',\n",
"\n",
" # Classifier diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" 'verbosity' : 0,\n",
"\n",
" # This controls how fast the classifier learns/forgets. Higher values\n",
" # make it adapt faster and forget older patterns faster.\n",
" 'alpha': 0.005,\n",
"\n",
" # This is set after the call to updateConfigFromSubConfig and is\n",
" # computed from the aggregationInfo and predictAheadTime.\n",
" 'steps': '1,5',\n",
"\n",
" 'implementation': 'cpp',\n",
" },\n",
"\n",
" 'trainSPNetOnlyIfRequested': False,\n",
" },\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Dataset Helpers"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/Users/mleborgne/_git/nupic/src/nupic/datafiles/extra/hotgym/hotgym.csv\n",
"\n",
"gym,address,timestamp,consumption\n",
"string,string,datetime,float\n",
"S,,T,\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:00:00.0,5.3\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:15:00.0,5.5\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:30:00.0,5.1\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:45:00.0,5.3\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 01:00:00.0,5.2\n"
]
}
],
"source": [
"from pkg_resources import resource_filename\n",
"\n",
"datasetPath = resource_filename(\"nupic.datafiles\", \"extra/hotgym/hotgym.csv\")\n",
"print datasetPath\n",
"\n",
"with open(datasetPath) as inputFile:\n",
" print\n",
" for _ in xrange(8):\n",
" print inputFile.next().strip()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Loading Data\n",
"\n",
"`FileRecordStream` - file reader for the NuPIC file format (CSV with three header rows, understands datetimes)"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 0), 5.3]\n",
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 15), 5.5]\n",
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 30), 5.1]\n",
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 45), 5.3]\n",
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 1, 0), 5.2]\n"
]
}
],
"source": [
"from nupic.data.file_record_stream import FileRecordStream\n",
"\n",
"def getData():\n",
" return FileRecordStream(datasetPath)\n",
"\n",
"data = getData()\n",
"for _ in xrange(5):\n",
" print data.next()"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.frameworks.opf.modelfactory import ModelFactory\n",
"model = ModelFactory.create(MODEL_PARAMS)\n",
"model.enableInference({'predictedField': 'consumption'})"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"input: 5.3\n",
"prediction: 5.3\n",
"input: 5.5\n",
"prediction: 5.5\n",
"input: 5.1\n",
"prediction: 5.36\n",
"input: 5.3\n",
"prediction: 5.1\n",
"input: 5.2\n",
"prediction: 5.342\n",
"input: 5.5\n",
"prediction: 5.2994\n",
"input: 4.5\n",
"prediction: 5.35958\n",
"input: 1.2\n",
"prediction: 4.92\n",
"input: 1.1\n",
"prediction: 1.2\n",
"input: 1.2\n",
"prediction: 1.17\n",
"input: 1.2\n",
"prediction: 1.179\n",
"input: 1.2\n",
"prediction: 1.1853\n",
"input: 1.2\n",
"prediction: 1.18971\n",
"input: 1.2\n",
"prediction: 1.192797\n",
"input: 1.1\n",
"prediction: 1.1949579\n",
"input: 1.2\n",
"prediction: 1.16647053\n",
"input: 1.1\n",
"prediction: 1.176529371\n",
"input: 1.2\n",
"prediction: 1.1535705597\n",
"input: 1.2\n",
"prediction: 1.16749939179\n",
"input: 1.1\n",
"prediction: 1.17724957425\n",
"input: 1.2\n",
"prediction: 1.15407470198\n",
"input: 6.0\n",
"prediction: 1.16785229138\n",
"input: 7.9\n",
"prediction: 5.551706\n",
"input: 8.4\n",
"prediction: 6.2561942\n",
"input: 10.6\n",
"prediction: 6.89933594\n",
"input: 12.4\n",
"prediction: 10.6\n",
"input: 12.1\n",
"prediction: 12.4\n",
"input: 12.4\n",
"prediction: 12.31\n",
"input: 11.4\n",
"prediction: 12.337\n",
"input: 11.2\n",
"prediction: 10.84\n",
"input: 10.8\n",
"prediction: 10.948\n",
"input: 12.0\n",
"prediction: 10.9036\n",
"input: 11.8\n",
"prediction: 11.23252\n",
"input: 11.9\n",
"prediction: 11.402764\n",
"input: 11.4\n",
"prediction: 11.5519348\n",
"input: 11.0\n",
"prediction: 11.50635436\n",
"input: 9.8\n",
"prediction: 11.354448052\n",
"input: 9.8\n",
"prediction: 10.8881136364\n",
"input: 10.8\n",
"prediction: 10.5616795455\n",
"input: 11.1\n",
"prediction: 10.6331756818\n",
"input: 11.1\n",
"prediction: 10.7732229773\n",
"input: 11.0\n",
"prediction: 10.8712560841\n",
"input: 10.7\n",
"prediction: 10.9098792589\n",
"input: 10.6\n",
"prediction: 10.8469154812\n",
"input: 10.3\n",
"prediction: 10.7728408368\n",
"input: 10.1\n",
"prediction: 10.6309885858\n",
"input: 12.9\n",
"prediction: 10.4716920101\n",
"input: 10.5\n",
"prediction: 10.4716920101\n",
"input: 9.7\n",
"prediction: 10.480184407\n",
"input: 9.7\n",
"prediction: 10.2461290849\n",
"input: 9.2\n",
"prediction: 10.0822903594\n",
"input: 9.2\n",
"prediction: 9.81760325161\n",
"input: 9.2\n",
"prediction: 9.63232227613\n",
"input: 9.3\n",
"prediction: 9.50262559329\n",
"input: 9.1\n",
"prediction: 9.4418379153\n",
"input: 9.0\n",
"prediction: 9.33928654071\n",
"input: 8.9\n",
"prediction: 9.2375005785\n",
"input: 9.0\n",
"prediction: 9.13625040495\n",
"input: 8.9\n",
"prediction: 9.09537528346\n",
"input: 8.9\n",
"prediction: 9.03676269843\n",
"input: 9.0\n",
"prediction: 8.9957338889\n",
"input: 9.2\n",
"prediction: 8.99701372223\n",
"input: 10.0\n",
"prediction: 9.05790960556\n",
"input: 10.7\n",
"prediction: 9.34053672389\n",
"input: 8.9\n",
"prediction: 9.74837570672\n",
"input: 9.0\n",
"prediction: 9.49386299471\n",
"input: 9.0\n",
"prediction: 9.34570409629\n",
"input: 9.3\n",
"prediction: 9.24199286741\n",
"input: 9.3\n",
"prediction: 9.25939500718\n",
"input: 9.1\n",
"prediction: 9.27157650503\n",
"input: 9.1\n",
"prediction: 9.22010355352\n",
"input: 9.1\n",
"prediction: 9.18407248746\n",
"input: 9.2\n",
"prediction: 9.15885074122\n",
"input: 9.4\n",
"prediction: 9.17119551886\n",
"input: 9.3\n",
"prediction: 9.2398368632\n",
"input: 9.3\n",
"prediction: 9.25788580424\n",
"input: 9.1\n",
"prediction: 9.27052006297\n",
"input: 9.1\n",
"prediction: 9.21936404408\n",
"input: 11.0\n",
"prediction: 9.18355483085\n",
"input: 9.0\n",
"prediction: 9.7284883816\n",
"input: 8.6\n",
"prediction: 9.50994186712\n",
"input: 3.0\n",
"prediction: 9.50994186712\n",
"input: 1.3\n",
"prediction: 4.344\n",
"input: 1.2\n",
"prediction: 1.20749660397\n",
"input: 1.3\n",
"prediction: 1.20524762278\n",
"input: 1.3\n",
"prediction: 1.23367333594\n",
"input: 1.3\n",
"prediction: 1.25357133516\n",
"input: 1.2\n",
"prediction: 1.26749993461\n",
"input: 1.3\n",
"prediction: 1.24724995423\n",
"input: 1.2\n",
"prediction: 1.26307496796\n",
"input: 1.3\n",
"prediction: 1.24415247757\n",
"input: 1.2\n",
"prediction: 1.2609067343\n",
"input: 1.3\n",
"prediction: 1.24263471401\n",
"input: 1.2\n",
"prediction: 1.25984429981\n",
"input: 1.1\n",
"prediction: 1.24189100987\n",
"input: 2.3\n",
"prediction: 1.19932370691\n",
"input: 5.5\n",
"prediction: 3.7308\n",
"input: 5.5\n",
"prediction: 6.8366746106\n",
"input: 5.8\n",
"prediction: 6.43567222742\n",
"input: 5.7\n",
"prediction: 6.24497055919\n"
]
}
],
"source": [
"data = getData()\n",
"for _ in xrange(100):\n",
" record = dict(zip(data.getFieldNames(), data.next()))\n",
" print \"input: \", record[\"consumption\"]\n",
" result = model.run(record)\n",
" print \"prediction: \", result.inferences[\"multiStepBestPredictions\"][1]"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"5-step prediction: 1.19932370691\n"
]
}
],
"source": [
"print \"5-step prediction: \", result.inferences[\"multiStepBestPredictions\"][5]"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Anomaly Score"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Model Params!\n",
"MODEL_PARAMS = {\n",
" # Type of model that the rest of these parameters apply to.\n",
" 'model': \"HTMPrediction\",\n",
"\n",
" # Version that specifies the format of the config.\n",
" 'version': 1,\n",
"\n",
" # Intermediate variables used to compute fields in modelParams and also\n",
" # referenced from the control section.\n",
" 'aggregationInfo': { 'days': 0,\n",
" 'fields': [('consumption', 'sum')],\n",
" 'hours': 1,\n",
" 'microseconds': 0,\n",
" 'milliseconds': 0,\n",
" 'minutes': 0,\n",
" 'months': 0,\n",
" 'seconds': 0,\n",
" 'weeks': 0,\n",
" 'years': 0},\n",
"\n",
" 'predictAheadTime': None,\n",
"\n",
" # Model parameter dictionary.\n",
" 'modelParams': {\n",
" # The type of inference that this model will perform\n",
" 'inferenceType': 'TemporalAnomaly',\n",
"\n",
" 'sensorParams': {\n",
" # Sensor diagnostic output verbosity control;\n",
" # if > 0: sensor region will print out on screen what it's sensing\n",
" # at each step 0: silent; >=1: some info; >=2: more info;\n",
" # >=3: even more info (see compute() in py/regions/RecordSensor.py)\n",
" 'verbosity' : 0,\n",
"\n",
" # Include the encoders we use\n",
" 'encoders': {\n",
" u'timestamp_timeOfDay': {\n",
" 'fieldname': u'timestamp',\n",
" 'name': u'timestamp_timeOfDay',\n",
" 'timeOfDay': (21, 0.5),\n",
" 'type': 'DateEncoder'},\n",
" u'timestamp_dayOfWeek': None,\n",
" u'timestamp_weekend': None,\n",
" u'consumption': {\n",
" 'clipInput': True,\n",
" 'fieldname': u'consumption',\n",
" 'maxval': 100.0,\n",
" 'minval': 0.0,\n",
" 'n': 50,\n",
" 'name': u'c1',\n",
" 'type': 'ScalarEncoder',\n",
" 'w': 21},},\n",
"\n",
" # A dictionary specifying the period for automatically-generated\n",
" # resets from a RecordSensor;\n",
" #\n",
" # None = disable automatically-generated resets (also disabled if\n",
" # all of the specified values evaluate to 0).\n",
" # Valid keys is the desired combination of the following:\n",
" # days, hours, minutes, seconds, milliseconds, microseconds, weeks\n",
" #\n",
" # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),\n",
" #\n",
" # (value generated from SENSOR_AUTO_RESET)\n",
" 'sensorAutoReset' : None,\n",
" },\n",
"\n",
" 'spEnable': True,\n",
"\n",
" 'spParams': {\n",
" # SP diagnostic output verbosity control;\n",
" # 0: silent; >=1: some info; >=2: more info;\n",
" 'spVerbosity' : 0,\n",
"\n",
" # Spatial Pooler implementation selector, see getSPClass\n",
" # in py/regions/SPRegion.py for details\n",
" # 'py' (default), 'cpp' (speed optimized, new)\n",
" 'spatialImp' : 'cpp',\n",
"\n",
" 'globalInhibition': 1,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
" # SP and TM)\n",
" # (see also tpNCellsPerCol)\n",
" 'columnCount': 2048,\n",
"\n",
" 'inputWidth': 0,\n",
"\n",
" # SP inhibition control (absolute value);\n",
" # Maximum number of active columns in the SP region's output (when\n",
" # there are more, the weaker ones are suppressed)\n",
" 'numActiveColumnsPerInhArea': 40,\n",
"\n",
" 'seed': 1956,\n",
"\n",
" # potentialPct\n",
" # What percent of the columns's receptive field is available\n",
" # for potential synapses. At initialization time, we will\n",
" # choose potentialPct * (2*potentialRadius+1)^2\n",
" 'potentialPct': 0.5,\n",
"\n",
" # The default connected threshold. Any synapse whose\n",
" # permanence value is above the connected threshold is\n",
" # a \"connected synapse\", meaning it can contribute to the\n",
" # cell's firing. Typical value is 0.10. Cells whose activity\n",
" # level before inhibition falls below minDutyCycleBeforeInh\n",
" # will have their own internal synPermConnectedCell\n",
" # threshold set below this default value.\n",
" # (This concept applies to both SP and TM and so 'cells'\n",
" # is correct here as opposed to 'columns')\n",
" 'synPermConnected': 0.1,\n",
"\n",
" 'synPermActiveInc': 0.1,\n",
"\n",
" 'synPermInactiveDec': 0.005,\n",
" },\n",
"\n",
" # Controls whether TM is enabled or disabled;\n",
" # TM is necessary for making temporal predictions, such as predicting\n",
" # the next inputs. Without TP, the model is only capable of\n",
" # reconstructing missing sensor inputs (via SP).\n",
" 'tmEnable' : True,\n",
"\n",
" 'tmParams': {\n",
" # TM diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n",
" 'verbosity': 0,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
" # SP and TM)\n",
" # (see also tpNCellsPerCol)\n",
" 'columnCount': 2048,\n",
"\n",
" # The number of cells (i.e., states), allocated per column.\n",
" 'cellsPerColumn': 32,\n",
"\n",
" 'inputWidth': 2048,\n",
"\n",
" 'seed': 1960,\n",
"\n",
" # Temporal Pooler implementation selector (see _getTPClass in\n",
" # CLARegion.py).\n",
" 'temporalImp': 'cpp',\n",
"\n",
" # New Synapse formation count\n",
" # NOTE: If None, use spNumActivePerInhArea\n",
" #\n",
" # TODO: need better explanation\n",
" 'newSynapseCount': 20,\n",
"\n",
" # Maximum number of synapses per segment\n",
" # > 0 for fixed-size CLA\n",
" # -1 for non-fixed-size CLA\n",
" #\n",
" # TODO: for Ron: once the appropriate value is placed in TP\n",
" # constructor, see if we should eliminate this parameter from\n",
" # description.py.\n",
" 'maxSynapsesPerSegment': 32,\n",
"\n",
" # Maximum number of segments per cell\n",
" # > 0 for fixed-size CLA\n",
" # -1 for non-fixed-size CLA\n",
" #\n",
" # TODO: for Ron: once the appropriate value is placed in TP\n",
" # constructor, see if we should eliminate this parameter from\n",
" # description.py.\n",
" 'maxSegmentsPerCell': 128,\n",
"\n",
" # Initial Permanence\n",
" # TODO: need better explanation\n",
" 'initialPerm': 0.21,\n",
"\n",
" # Permanence Increment\n",
" 'permanenceInc': 0.1,\n",
"\n",
" # Permanence Decrement\n",
" # If set to None, will automatically default to tpPermanenceInc\n",
" # value.\n",
" 'permanenceDec' : 0.1,\n",
"\n",
" 'globalDecay': 0.0,\n",
"\n",
" 'maxAge': 0,\n",
"\n",
" # Minimum number of active synapses for a segment to be considered\n",
" # during search for the best-matching segments.\n",
" # None=use default\n",
" # Replaces: tpMinThreshold\n",
" 'minThreshold': 9,\n",
"\n",
" # Segment activation threshold.\n",
" # A segment is active if it has >= tpSegmentActivationThreshold\n",
" # connected synapses that are active due to infActiveState\n",
" # None=use default\n",
" # Replaces: tpActivationThreshold\n",
" 'activationThreshold': 12,\n",
"\n",
" 'outputType': 'normal',\n",
"\n",
" # \"Pay Attention Mode\" length. This tells the TM how many new\n",
" # elements to append to the end of a learned sequence at a time.\n",
" # Smaller values are better for datasets with short sequences,\n",
" # higher values are better for datasets with long sequences.\n",
" 'pamLength': 1,\n",
" },\n",
"\n",
" 'clParams': {\n",
" 'regionName' : 'SDRClassifierRegion',\n",
"\n",
" # Classifier diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" 'verbosity' : 0,\n",
"\n",
" # This controls how fast the classifier learns/forgets. Higher values\n",
" # make it adapt faster and forget older patterns faster.\n",
" 'alpha': 0.005,\n",
"\n",
" # This is set after the call to updateConfigFromSubConfig and is\n",
" # computed from the aggregationInfo and predictAheadTime.\n",
" 'steps': '1',\n",
"\n",
" 'implementation': 'cpp',\n",
" },\n",
"\n",
" 'anomalyParams': {\n",
" u'anomalyCacheRecords': None,\n",
" u'autoDetectThreshold': None,\n",
" u'autoDetectWaitRecords': 2184\n",
" },\n",
"\n",
" 'trainSPNetOnlyIfRequested': False,\n",
" },\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 44,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.frameworks.opf.modelfactory import ModelFactory\n",
"model = ModelFactory.create(MODEL_PARAMS)\n",
"model.enableInference({'predictedField': 'consumption'})"
]
},
{
"cell_type": "code",
"execution_count": 45,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"input: 5.3\n",
"prediction: 5.3\n",
"input: 5.5\n",
"prediction: 5.5\n",
"input: 5.1\n",
"prediction: 5.36\n",
"input: 5.3\n",
"prediction: 5.1\n",
"input: 5.2\n",
"prediction: 5.342\n"
]
}
],
"source": [
"data = getData()\n",
"for _ in xrange(5):\n",
" record = dict(zip(data.getFieldNames(), data.next()))\n",
" print \"input: \", record[\"consumption\"]\n",
" result = model.run(record)\n",
" print \"prediction: \", result.inferences[\"multiStepBestPredictions\"][1]"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ModelResult(\tpredictionNumber=4\n",
"\trawInput={'timestamp': datetime.datetime(2010, 7, 2, 1, 0), 'gym': 'Balgowlah Platinum', 'consumption': 5.2, 'address': 'Shop 67 197-215 Condamine Street Balgowlah 2093'}\n",
"\tsensorInput=SensorInput(\tdataRow=(5.2, 1.0)\n",
"\tdataDict={'timestamp': datetime.datetime(2010, 7, 2, 1, 0), 'gym': 'Balgowlah Platinum', 'consumption': 5.2, 'address': 'Shop 67 197-215 Condamine Street Balgowlah 2093'}\n",
"\tdataEncodings=[array([ 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32), array([ 0., 0., 0., ..., 0., 0., 0.], dtype=float32)]\n",
"\tsequenceReset=0.0\n",
"\tcategory=-1\n",
")\n",
"\tinferences={'multiStepPredictions': {1: {5.1: 0.0088801263517415546, 5.2: 0.010775254623541418, 5.341999999999999: 0.98034461902471692}}, 'multiStepBucketLikelihoods': {1: {1: 0.0088801263517415546, 2: 0.98034461902471692}}, 'multiStepBestPredictions': {1: 5.341999999999999}, 'anomalyLabel': '[]', 'anomalyScore': 0.40000001}\n",
"\tmetrics=None\n",
"\tpredictedFieldIdx=0\n",
"\tpredictedFieldName=consumption\n",
"\tclassifierInput=ClassifierInput(\tdataRow=5.2\n",
"\tbucketIndex=2\n",
")\n",
")\n"
]
}
],
"source": [
"print result"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"anomaly score: 0.4\n"
]
}
],
"source": [
"print \"anomaly score: \", result.inferences[\"anomalyScore\"]"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"__See Subutai's talk for more info on anomaly detection!__\n",
"\n",
"# Built-in OPF Clients\n",
"\n",
"`python examples/opf/bin/OpfRunExperiment.py examples/opf/experiments/multistep/hotgym/`\n",
"\n",
"Outputs `examples/opf/experiments/multistep/hotgym/inference/DefaultTask.TemporalMultiStep.predictionLog.csv`\n",
"\n",
"`python bin/run_swarm.py examples/opf/experiments/multistep/hotgym/permutations.py`\n",
"\n",
"Outputs `examples/opf/experiments/multistep/hotgym/model_0/description.py`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
""
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2.0
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
}
},
"nbformat": 4,
"nbformat_minor": 0
} | 62,682 | Python | .py | 2,051 | 25.465627 | 339 | 0.516559 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,867 | serializable.py | numenta_nupic-legacy/src/nupic/serializable.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
class Serializable(object):
"""
Serializable base class establishing
:meth:`~nupic.serializable.Serializable.read` and
:meth:`~nupic.serializable.Serializable.write` abstract methods,
:meth:`.readFromFile` and :meth:`.writeToFile` concrete methods to support
serialization with Cap'n Proto.
"""
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def getSchema(cls):
"""
Get Cap'n Proto schema.
..warning: This is an abstract method. Per abc protocol, attempts to subclass
without overriding will fail.
@returns Cap'n Proto schema
"""
pass
@classmethod
@abstractmethod
def read(cls, proto):
"""
Create a new object initialized from Cap'n Proto obj.
Note: This is an abstract method. Per abc protocol, attempts to subclass
without overriding will fail.
:param proto: Cap'n Proto obj
:return: Obj initialized from proto
"""
pass
@abstractmethod
def write(self, proto):
"""
Write obj instance to Cap'n Proto object
.. warning: This is an abstract method. Per abc protocol, attempts to
subclass without overriding will fail.
:param proto: Cap'n Proto obj
"""
pass
@classmethod
def readFromFile(cls, f, packed=True):
"""
Read serialized object from file.
:param f: input file
:param packed: If true, will assume content is packed
:return: first-class instance initialized from proto obj
"""
# Get capnproto schema from instance
schema = cls.getSchema()
# Read from file
if packed:
proto = schema.read_packed(f)
else:
proto = schema.read(f)
# Return first-class instance initialized from proto obj
return cls.read(proto)
def writeToFile(self, f, packed=True):
"""
Write serialized object to file.
:param f: output file
:param packed: If true, will pack contents.
"""
# Get capnproto schema from instance
schema = self.getSchema()
# Construct new message, otherwise refered to as `proto`
proto = schema.new_message()
# Populate message w/ `write()` instance method
self.write(proto)
# Finally, write to file
if packed:
proto.write_packed(f)
else:
proto.write(f)
| 3,284 | Python | .py | 94 | 30.648936 | 82 | 0.682796 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,868 | utils.py | numenta_nupic-legacy/src/nupic/utils.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
utils.py are a collection of methods that can be reused by different classes
in our codebase.
"""
import numbers
from nupic.serializable import Serializable
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.movingaverage_capnp import MovingAverageProto
class MovingAverage(Serializable):
"""Helper class for computing moving average and sliding window"""
def __init__(self, windowSize, existingHistoricalValues=None):
"""
new instance of MovingAverage, so method .next() can be used
@param windowSize - length of sliding window
@param existingHistoricalValues - construct the object with already
some values in it.
"""
if not isinstance(windowSize, numbers.Integral):
raise TypeError("MovingAverage - windowSize must be integer type")
if windowSize <= 0:
raise ValueError("MovingAverage - windowSize must be >0")
self.windowSize = windowSize
if existingHistoricalValues is not None:
self.slidingWindow = existingHistoricalValues[
len(existingHistoricalValues)-windowSize:]
else:
self.slidingWindow = []
self.total = float(sum(self.slidingWindow))
@staticmethod
def compute(slidingWindow, total, newVal, windowSize):
"""Routine for computing a moving average.
@param slidingWindow a list of previous values to use in computation that
will be modified and returned
@param total the sum of the values in slidingWindow to be used in the
calculation of the moving average
@param newVal a new number compute the new windowed average
@param windowSize how many values to use in the moving window
@returns an updated windowed average, the modified input slidingWindow list,
and the new total sum of the sliding window
"""
if len(slidingWindow) == windowSize:
total -= slidingWindow.pop(0)
slidingWindow.append(newVal)
total += newVal
return float(total) / len(slidingWindow), slidingWindow, total
def next(self, newValue):
"""Instance method wrapper around compute."""
newAverage, self.slidingWindow, self.total = self.compute(
self.slidingWindow, self.total, newValue, self.windowSize)
return newAverage
def getSlidingWindow(self):
return self.slidingWindow
def getCurrentAvg(self):
"""get current average"""
return float(self.total) / len(self.slidingWindow)
# TODO obsoleted by capnp, will be removed in future
def __setstate__(self, state):
""" for loading this object"""
self.__dict__.update(state)
if not hasattr(self, "slidingWindow"):
self.slidingWindow = []
if not hasattr(self, "total"):
self.total = 0
self.slidingWindow = sum(self.slidingWindow)
def __eq__(self, o):
return (isinstance(o, MovingAverage) and
o.slidingWindow == self.slidingWindow and
o.total == self.total and
o.windowSize == self.windowSize)
def __call__(self, value):
return self.next(value)
@classmethod
def read(cls, proto):
movingAverage = object.__new__(cls)
movingAverage.windowSize = proto.windowSize
movingAverage.slidingWindow = list(proto.slidingWindow)
movingAverage.total = proto.total
return movingAverage
def write(self, proto):
proto.windowSize = self.windowSize
proto.slidingWindow = self.slidingWindow
proto.total = self.total
@classmethod
def getSchema(cls):
return MovingAverageProto
| 4,474 | Python | .py | 109 | 36.40367 | 80 | 0.709834 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,869 | __init__.py | numenta_nupic-legacy/src/nupic/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
__import__("pkg_resources").declare_namespace(__name__)
| 1,038 | Python | .py | 21 | 48.380952 | 72 | 0.667323 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,870 | simple_server.py | numenta_nupic-legacy/src/nupic/simple_server.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple web server for interacting with NuPIC.
Note: Requires web.py to run (install using '$ pip install web.py')
"""
import os
import sys
# The following loop removes the nupic package from the
# PythonPath (sys.path). This is necessary in order to let web
# import the built in math module rather than defaulting to
# nupic.math
while True:
try:
sys.path.remove(os.path.dirname(os.path.realpath(__file__)))
except:
break
import datetime
import json
import web
from nupic.frameworks.opf.model_factory import ModelFactory
g_models = {}
urls = (
# Web UI
"/models", "ModelHandler",
r"/models/([-\w]*)", "ModelHandler",
r"/models/([-\w]*)/run", "ModelRunner",
)
class ModelHandler(object):
def GET(self):
"""
/models
returns:
[model1, model2, model3, ...] list of model names
"""
global g_models
return json.dumps({"models": g_models.keys()})
def POST(self, name):
"""
/models/{name}
schema:
{
"modelParams": dict containing model parameters
"predictedFieldName": str
}
returns:
{"success":name}
"""
global g_models
data = json.loads(web.data())
modelParams = data["modelParams"]
predictedFieldName = data["predictedFieldName"]
if name in g_models.keys():
raise web.badrequest("Model with name <%s> already exists" % name)
model = ModelFactory.create(modelParams)
model.enableInference({'predictedField': predictedFieldName})
g_models[name] = model
return json.dumps({"success": name})
class ModelRunner(object):
def POST(self, name):
"""
/models/{name}/run
schema:
{
predictedFieldName: value
timestamp: %m/%d/%y %H:%M
}
NOTE: predictedFieldName MUST be the same name specified when
creating the model.
returns:
{
"predictionNumber":<number of record>,
"anomalyScore":anomalyScore
}
"""
global g_models
data = json.loads(web.data())
data["timestamp"] = datetime.datetime.strptime(
data["timestamp"], "%m/%d/%y %H:%M")
if name not in g_models.keys():
raise web.notfound("Model with name <%s> does not exist." % name)
modelResult = g_models[name].run(data)
predictionNumber = modelResult.predictionNumber
anomalyScore = modelResult.inferences["anomalyScore"]
return json.dumps({"predictionNumber": predictionNumber,
"anomalyScore": anomalyScore})
web.config.debug = False
app = web.application(urls, globals())
if __name__ == "__main__":
app.run()
| 3,569 | Python | .py | 108 | 28.87963 | 72 | 0.665012 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,871 | filters.py | numenta_nupic-legacy/src/nupic/data/filters.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from datetime import datetime, timedelta
class AutoResetFilter(object):
"""Initial implementation of auto-reset is fairly simple. You just give it a
time interval. Like aggregation, we the first time period start with the
time of the first record (t0) and signal a reset at the first record on or after
t0 + interval, t0 + 2 * interval, etc.
We could get much fancier than this, but it is not clear what will be
needed. For example, if you want a reset every day, you might expect the
period to start at midnight. We also don't handle variable-time periods --
month and year.
"""
def __init__(self, interval=None, datetimeField=None):
self.setInterval(interval, datetimeField)
def setInterval(self, interval=None, datetimeField=None):
if interval is not None:
assert isinstance(interval, timedelta)
self.interval = interval
self.datetimeField = datetimeField
self.lastAutoReset = None
def process(self, data):
if self.interval is None:
return True # no more data needed
if self.datetimeField is None:
self._getDatetimeField(data)
date = data[self.datetimeField]
if data['_reset'] != 0:
self.lastAutoReset = date
return True # no more data needed
if self.lastAutoReset is None:
self.lastAutoReset = date
return True
if date >= self.lastAutoReset + self.interval:
# might have skipped several intervals
while date >= self.lastAutoReset + self.interval:
self.lastAutoReset += self.interval
data['_reset'] = 1
return True # no more data needed
elif date < self.lastAutoReset:
# sequence went back in time!
self.lastAutoReset = date
return True
def _getDatetimeField(self, data):
datetimeField = None
assert isinstance(data, dict)
for (name, value) in data.items():
if isinstance(value, datetime):
datetimeField = name
break
if datetimeField is None:
raise RuntimeError("Autoreset requested for the data but there is no date field")
self.datetimeField = datetimeField
def getShortName(self):
if interval is not None:
s = "autoreset_%d_%d" % (interval.days, interval.seconds)
else:
s = "autoreset_none"
return s
class DeltaFilter(object):
def __init__(self, origField, deltaField):
"""Add a delta field to the data.
"""
self.origField = origField
self.deltaField = deltaField
self.previousValue = None
self.rememberReset = False
def process(self, data):
val = data[self.origField]
if self.previousValue is None or data['_reset']:
self.previousValue = val
self.rememberReset = data['_reset']
return False
# We have a delta
delta = val - self.previousValue
self.previousValue = val
if isinstance(delta, timedelta):
data[self.deltaField] = float(delta.days * 24 * 3600) + \
float(delta.seconds) + float(delta.microseconds) * 1.0e-6
else:
data[self.deltaField] = float(delta)
if self.rememberReset:
data['_reset'] = 1
self.rememberReset = False
return True
def getShortName(self):
return "delta_%s" % self.origField
| 4,172 | Python | .py | 105 | 35.066667 | 87 | 0.688273 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,872 | record_stream.py | numenta_nupic-legacy/src/nupic/data/record_stream.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Interface for different types of storages (file, hbase, rio, etc)."""
from abc import ABCMeta, abstractmethod
import datetime
from nupic.data.field_meta import FieldMetaSpecial
def _getFieldIndexBySpecial(fields, special):
""" Return index of the field matching the field meta special value.
:param fields: sequence of nupic.data.fieldmeta.FieldMetaInfo objects
representing the fields of a stream
:param special: one of the special field attribute values from
nupic.data.fieldmeta.FieldMetaSpecial
:returns: first zero-based index of the field tagged with the target field
meta special attribute; None if no such field
"""
for i, field in enumerate(fields):
if field.special == special:
return i
return None
class ModelRecordEncoder(object):
"""Encodes metric data input rows for consumption by OPF models. See
the `ModelRecordEncoder.encode` method for more details.
"""
def __init__(self, fields, aggregationPeriod=None):
"""
:param fields: non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo
objects corresponding to fields in input rows.
:param aggregationPeriod: (dict) aggregation period of the record stream
containing 'months' and 'seconds'. The months is always an integer
and seconds is a floating point. Only one is allowed to be non-zero at a
time. If there is no aggregation associated with the stream, pass None.
Typically, a raw file or hbase stream will NOT have any aggregation info,
but subclasses of RecordStreamIface, like StreamReader, will and will
provide the aggregation period. This is used by the encode method to
assign a record number to a record given its timestamp and the aggregation
interval.
"""
if not fields:
raise ValueError('fields arg must be non-empty, but got %r' % (fields,))
self._fields = fields
self._aggregationPeriod = aggregationPeriod
self._sequenceId = -1
self._fieldNames = tuple(f.name for f in fields)
self._categoryFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.category)
self._resetFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.reset)
self._sequenceFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.sequence)
self._timestampFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.timestamp)
self._learningFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.learning)
def rewind(self):
"""Put us back at the beginning of the file again """
self._sequenceId = -1
def encode(self, inputRow):
"""Encodes the given input row as a dict, with the
keys being the field names. This also adds in some meta fields:
'_category': The value from the category field (if any)
'_reset': True if the reset field was True (if any)
'_sequenceId': the value from the sequenceId field (if any)
:param inputRow: sequence of values corresponding to a single input metric
data row
:rtype: dict
"""
# Create the return dict
result = dict(zip(self._fieldNames, inputRow))
# Add in the special fields
if self._categoryFieldIndex is not None:
# category value can be an int or a list
if isinstance(inputRow[self._categoryFieldIndex], int):
result['_category'] = [inputRow[self._categoryFieldIndex]]
else:
result['_category'] = (inputRow[self._categoryFieldIndex]
if inputRow[self._categoryFieldIndex]
else [None])
else:
result['_category'] = [None]
if self._resetFieldIndex is not None:
result['_reset'] = int(bool(inputRow[self._resetFieldIndex]))
else:
result['_reset'] = 0
if self._learningFieldIndex is not None:
result['_learning'] = int(bool(inputRow[self._learningFieldIndex]))
result['_timestampRecordIdx'] = None
if self._timestampFieldIndex is not None:
result['_timestamp'] = inputRow[self._timestampFieldIndex]
# Compute the record index based on timestamp
result['_timestampRecordIdx'] = self._computeTimestampRecordIdx(
inputRow[self._timestampFieldIndex])
else:
result['_timestamp'] = None
# -----------------------------------------------------------------------
# Figure out the sequence ID
hasReset = self._resetFieldIndex is not None
hasSequenceId = self._sequenceFieldIndex is not None
if hasReset and not hasSequenceId:
# Reset only
if result['_reset']:
self._sequenceId += 1
sequenceId = self._sequenceId
elif not hasReset and hasSequenceId:
sequenceId = inputRow[self._sequenceFieldIndex]
result['_reset'] = int(sequenceId != self._sequenceId)
self._sequenceId = sequenceId
elif hasReset and hasSequenceId:
sequenceId = inputRow[self._sequenceFieldIndex]
else:
sequenceId = 0
if sequenceId is not None:
result['_sequenceId'] = hash(sequenceId)
else:
result['_sequenceId'] = None
return result
def _computeTimestampRecordIdx(self, recordTS):
""" Give the timestamp of a record (a datetime object), compute the record's
timestamp index - this is the timestamp divided by the aggregation period.
Parameters:
------------------------------------------------------------------------
recordTS: datetime instance
retval: record timestamp index, or None if no aggregation period
"""
if self._aggregationPeriod is None:
return None
# Base record index on number of elapsed months if aggregation is in
# months
if self._aggregationPeriod['months'] > 0:
assert self._aggregationPeriod['seconds'] == 0
result = int(
(recordTS.year * 12 + (recordTS.month-1)) /
self._aggregationPeriod['months'])
# Base record index on elapsed seconds
elif self._aggregationPeriod['seconds'] > 0:
delta = recordTS - datetime.datetime(year=1, month=1, day=1)
deltaSecs = delta.days * 24 * 60 * 60 \
+ delta.seconds \
+ delta.microseconds / 1000000.0
result = int(deltaSecs / self._aggregationPeriod['seconds'])
else:
result = None
return result
class RecordStreamIface(object):
"""
This is the interface for the record input/output storage classes.
"""
__metaclass__ = ABCMeta
def __init__(self):
# Will be initialized on-demand in getNextRecordDict with a
# ModelRecordEncoder instance, once encoding metadata is available
self._modelRecordEncoder = None
@abstractmethod
def close(self):
""" Close the stream
"""
def rewind(self):
"""Put us back at the beginning of the file again. """
if self._modelRecordEncoder is not None:
self._modelRecordEncoder.rewind()
@abstractmethod
def getNextRecord(self, useCache=True):
"""
Returns next available data record from the storage. If ``useCache`` is
``False``, then don't read ahead and don't cache any records.
:return: a data row (a list or tuple) if available; None, if no more records
in the table (End of Stream - EOS); empty sequence (list or tuple)
when timing out while waiting for the next record.
"""
def getNextRecordDict(self):
"""Returns next available data record from the storage as a dict, with the
keys being the field names. This also adds in some meta fields:
- ``_category``: The value from the category field (if any)
- ``_reset``: True if the reset field was True (if any)
- ``_sequenceId``: the value from the sequenceId field (if any)
"""
values = self.getNextRecord()
if values is None:
return None
if not values:
return dict()
if self._modelRecordEncoder is None:
self._modelRecordEncoder = ModelRecordEncoder(
fields=self.getFields(),
aggregationPeriod=self.getAggregationMonthsAndSeconds())
return self._modelRecordEncoder.encode(values)
def getAggregationMonthsAndSeconds(self):
"""
Returns the aggregation period of the record stream as a dict
containing 'months' and 'seconds'. The months is always an integer and
seconds is a floating point. Only one is allowed to be non-zero.
If there is no aggregation associated with the stream, returns None.
Typically, a raw file or hbase stream will NOT have any aggregation info,
but subclasses of :class:`~nupic.data.record_stream.RecordStreamIface`, like
:class:`~nupic.data.stream_reader.StreamReader`, will and will return the
aggregation period from this call. This call is used by
:meth:`getNextRecordDict` to assign a record number to a record given its
timestamp and the aggregation interval.
:returns: ``None``
"""
return None
@abstractmethod
def getNextRecordIdx(self):
"""
:returns: (int) index of the record that will be read next from
:meth:`getNextRecord`
"""
@abstractmethod
def appendRecord(self, record):
"""
Saves the record in the underlying storage. Should be implemented in
subclasses.
:param record: (object) to store
"""
@abstractmethod
def appendRecords(self, records, progressCB=None):
"""
Saves multiple records in the underlying storage. Should be implemented in
subclasses.
:param records: (list) of objects to store
:param progressCB: (func) called after each appension
"""
@abstractmethod
def getBookmark(self):
"""Returns an anchor to the current position in the data. Passing this
anchor to the constructor makes the current position to be the first
returned record. If record is no longer in the storage, the first available
after it will be returned.
:returns: anchor to current position in the data.
"""
@abstractmethod
def recordsExistAfter(self, bookmark):
"""
:param bookmark: (int) where to start
:returns: True if there are records left after the bookmark.
"""
@abstractmethod
def seekFromEnd(self, numRecords):
"""
:param numRecords: (int) number of records from the end.
:returns: (int) a bookmark numRecords from the end of the stream.
"""
@abstractmethod
def getStats(self):
"""
:returns: storage stats (like min and max values of the fields).
"""
def getFieldMin(self, fieldName):
"""
If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
:param fieldName: (string) name of field to get min
:returns: current minimum value for the field ``fieldName``.
"""
stats = self.getStats()
if stats == None:
return None
minValues = stats.get('min', None)
if minValues == None:
return None
index = self.getFieldNames().index(fieldName)
return minValues[index]
def getFieldMax(self, fieldName):
"""
If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
:param fieldName: (string) name of field to get max
:returns: current maximum value for the field ``fieldName``.
"""
stats = self.getStats()
if stats == None:
return None
maxValues = stats.get('max', None)
if maxValues == None:
return None
index = self.getFieldNames().index(fieldName)
return maxValues[index]
@abstractmethod
def clearStats(self):
"""Resets stats collected so far."""
@abstractmethod
def getError(self):
""":returns: errors saved in the storage."""
@abstractmethod
def setError(self, error):
"""
Saves specified error in the storage.
:param error: Error to store.
"""
@abstractmethod
def isCompleted(self):
"""
:returns: True if all records are already in the storage or False
if more records is expected.
"""
@abstractmethod
def setCompleted(self, completed):
"""
Marks the stream completed.
:param completed: (bool) is completed?
"""
@abstractmethod
def getFieldNames(self):
"""
:returns: (list) of field names associated with the data.
"""
@abstractmethod
def getFields(self):
"""
:returns: (list) of :class:`nupic.data.fieldmeta.FieldMetaInfo` objects for
each field in the stream. Might be None, if that information is provided
externally (through the `Stream Definition <stream-def.html>`_,
for example).
"""
def getResetFieldIdx(self):
"""
:returns: (int) index of the ``reset`` field; ``None`` if no such field.
"""
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.reset)
def getTimestampFieldIdx(self):
"""
:returns: (int) index of the ``timestamp`` field.
"""
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.timestamp)
def getSequenceIdFieldIdx(self):
"""
:returns: (int) index of the ``sequenceId`` field.
"""
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.sequence)
def getCategoryFieldIdx(self):
"""
:returns: (int) index of ``category`` field
"""
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.category)
def getLearningFieldIdx(self):
"""
:returns: (int) index of the ``learning`` field.
"""
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.learning)
@abstractmethod
def setTimeout(self, timeout):
"""
Set the read timeout in seconds
:param timeout: (int or floating point)
"""
@abstractmethod
def flush(self):
""" Flush the file to disk """
| 14,841 | Python | .py | 372 | 34.416667 | 80 | 0.687304 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,873 | category_filter.py | numenta_nupic-legacy/src/nupic/data/category_filter.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
'''
A category filter can be applied to any categorical field
The basic operation is assumed to be: OR
In the final version users may input Boolean algebra to define this
behaviour
If your field is 'animals'
and your values are
1 - dogs
2 - cat
3 - mouse
4 - giraffe
5 - hippo
A category filter for dog,giraffe
would return records 1 and 4
Note that we're using a substring search so that dogs ~= dog
We can't know all the categories before hand so we present to the user a
freeform input box.
'''
class CategoryFilter(object):
def __init__(self, filterDict):
"""
TODO describe filterDict schema
"""
self.filterDict = filterDict
def match(self, record):
'''
Returns True if the record matches any of the provided filters
'''
for field, meta in self.filterDict.iteritems():
index = meta['index']
categories = meta['categories']
for category in categories:
# Record might be blank, handle this
if not record:
continue
if record[index].find(category) != -1:
'''
This field contains the string we're searching for
so we'll keep the records
'''
return True
# None of the categories were found in this record
return False
| 2,253 | Python | .py | 63 | 32.206349 | 72 | 0.67954 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,874 | file_record_stream.py | numenta_nupic-legacy/src/nupic/data/file_record_stream.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
CSV file based implementation of a record stream
:class:`~.file_record_stream.FileRecordStream` is a class that can read and
write ``.csv`` files that contain records. The file has 3 header lines that
contain, for each field, the name (line 1), type (line 2), and a special
indicator (line 3). The special indicator can indicate that the field specifies
a reset, is a sequence ID, or is a timestamp for the record.
You can see an example of a NuPIC data file formatted for
:class:`~.file_record_stream.FileRecordStream`
`in the quick start <../../quick-start/example-data.html>`_.
The header lines look like:
::
f1,f2,f3,....fN
int,string,datetime,bool,...
R,S,T,,,,....
The data lines are just comma separated values that match the types in the
second header line. The supported types are: int, float, string, bool, datetime
The format for datetime fields is ``yyyy-mm-dd hh:mm:ss.us``
The 'us' component is microseconds.
When reading a file the FileRecordStream will automatically read the header line
and will figure out the type of each field and what are the timestamp, reset
and sequenceId fields (if any).
:class:`~.file_record_stream.FileRecordStream` supports the context manager
(``with`` statement) protocol. That means you can do:
.. code-block:: python
with FileRecordStream(filename) as f:
...
...
When the control exits the ``with`` block the file will be closed automatically.
You may still call the :meth:`~.file_record_stream.FileRecordStream.close`
method at any point (even multiple times).
:class:`~.file_record_stream.FileRecordStream` also supports the iteration
protocol so you may read its contents using a for loop:
.. code-block:: python
for r in f:
print r
"""
import os
import csv
import copy
import json
from nupic.data.field_meta import FieldMetaInfo, FieldMetaType, FieldMetaSpecial
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.record_stream import RecordStreamIface
from nupic.data.utils import (intOrNone, floatOrNone, parseBool, parseTimestamp,
serializeTimestamp, serializeTimestampNoMS, escape, unescape, parseSdr,
serializeSdr, parseStringList, stripList)
class FileRecordStream(RecordStreamIface):
"""
CSV file based RecordStream implementation
Each field is a 3-tuple (``name``, ``type``, ``special`` or
:class:`~.fieldmeta.FieldMetaSpecial`.none)
The name is the name of the field. The type is one of the constants in
:class:`~.fieldmeta.FieldMetaType`. The special is one of the
:class:`~.fieldmeta.FieldMetaSpecial` values that designate their field as the
sequenceId, reset, timestamp, or category. With exception of multiple
categories, there can be at most one of each. There may be multiple fields of
type datetime, but no more than one of them may be the timestamp field
(:class:`~.fieldmeta.FieldMetaSpecial`.timestamp). The sequence id
field must be either a string or an int. The reset field must be an int (and
must contain 0 or 1).
The category field must be an int or space-separated list of ints, where
the former represents single-label classification and the latter is for
multi-label classification (e.g. "1 3 4" designates a record for labels 1,
3, and 4). The number of categories is allowed to vary record to record;
sensor regions represent non-categories with -1, thus the category values
must be >= 0.
The FileRecordStream iterates over the field names, types and specials and
stores the information.
:param streamID:
CSV file name, input or output
:param write:
True or False, open for writing if True
:param fields:
a list of nupic.data.fieldmeta.FieldMetaInfo field descriptors, only
applicable when write==True
:param missingValues:
what missing values should be replaced with?
:param bookmark:
a reference to the previous reader, if passed in, the records will be
returned starting from the point where bookmark was requested. Either
bookmark or firstRecord can be specified, not both. If bookmark is used,
then firstRecord MUST be None.
:param includeMS:
If false, the microseconds portion is not included in the
generated output file timestamp fields. This makes it compatible
with reading in from Excel.
:param firstRecord:
0-based index of the first record to start reading from. Either bookmark
or firstRecord can be specified, not both. If bookmark is used, then
firstRecord MUST be None.
"""
# Private: number of header rows (field names, types, special)
_NUM_HEADER_ROWS = 3
# Private: file mode for opening file for writing
_FILE_WRITE_MODE = 'w'
# Private: file mode for opening file for reading
_FILE_READ_MODE = 'r'
def __init__(self, streamID, write=False, fields=None, missingValues=None,
bookmark=None, includeMS=True, firstRecord=None):
super(FileRecordStream, self).__init__()
# Only bookmark or firstRow can be specified, not both
if bookmark is not None and firstRecord is not None:
raise RuntimeError(
"Only bookmark or firstRecord can be specified, not both")
if fields is None:
fields = []
if missingValues is None:
missingValues = ['']
# We'll be operating on csvs with arbitrarily long fields
size = 2**27
csv.field_size_limit(size)
self._filename = streamID
# We can't guarantee what system files are coming from, use universal
# newlines
self._write = write
self._mode = self._FILE_WRITE_MODE if write else self._FILE_READ_MODE
self._file = open(self._filename, self._mode)
self._sequences = set()
self.rewindAtEOF = False
if write:
assert fields is not None
assert isinstance(fields, (tuple, list))
# Verify all fields are 3-tuple
assert all(isinstance(f, (tuple, FieldMetaInfo)) and len(f) == 3
for f in fields)
names, types, specials = zip(*fields)
self._writer = csv.writer(self._file)
else:
# Read header lines
self._reader = csv.reader(self._file, dialect="excel")
try:
names = [n.strip() for n in self._reader.next()]
except:
raise Exception('The header line of the file %s contained a NULL byte' \
% self._filename)
types = [t.strip() for t in self._reader.next()]
specials = [s.strip() for s in self._reader.next()]
# If there are no specials, this means there was a blank line
if len(specials) == 0:
specials=[""]
if not len(names) == len(types) == len(specials):
raise Exception('Invalid file format: different number of fields '
'in the header rows of file %s (%d, %d, %d)' %
(streamID, len(names), len(types), len(specials)))
# Verify standard file format
for t in types:
if not FieldMetaType.isValid(t):
raise Exception('Invalid file format for "%s" - field type "%s" '
'not a valid FieldMetaType' % (self._filename, t,))
for s in specials:
if not FieldMetaSpecial.isValid(s):
raise Exception('Invalid file format. \'%s\' is not a valid special '
'flag' % s)
self._fields = [FieldMetaInfo(*attrs)
for attrs in zip(names, types, specials)]
self._fieldCount = len(self._fields)
# Keep track on how many records have been read/written
self._recordCount = 0
self._timeStampIdx = (specials.index(FieldMetaSpecial.timestamp)
if FieldMetaSpecial.timestamp in specials else None)
self._resetIdx = (specials.index(FieldMetaSpecial.reset)
if FieldMetaSpecial.reset in specials else None)
self._sequenceIdIdx = (specials.index(FieldMetaSpecial.sequence)
if FieldMetaSpecial.sequence in specials else None)
self._categoryIdx = (specials.index(FieldMetaSpecial.category)
if FieldMetaSpecial.category in specials else None)
self._learningIdx = (specials.index(FieldMetaSpecial.learning)
if FieldMetaSpecial.learning in specials else None)
# keep track of the current sequence
self._currSequence = None
self._currTime = None
if self._timeStampIdx:
assert types[self._timeStampIdx] == FieldMetaType.datetime
if self._sequenceIdIdx:
assert types[self._sequenceIdIdx] in (FieldMetaType.string,
FieldMetaType.integer)
if self._resetIdx:
assert types[self._resetIdx] == FieldMetaType.integer
if self._categoryIdx:
assert types[self._categoryIdx] in (FieldMetaType.list,
FieldMetaType.integer)
if self._learningIdx:
assert types[self._learningIdx] == FieldMetaType.integer
# Convert the types to the actual types in order to convert the strings
if self._mode == self._FILE_READ_MODE:
m = {FieldMetaType.integer: intOrNone,
FieldMetaType.float: floatOrNone,
FieldMetaType.boolean: parseBool,
FieldMetaType.string: unescape,
FieldMetaType.datetime: parseTimestamp,
FieldMetaType.sdr: parseSdr,
FieldMetaType.list: parseStringList}
else:
if includeMS:
datetimeFunc = serializeTimestamp
else:
datetimeFunc = serializeTimestampNoMS
m = {FieldMetaType.integer: str,
FieldMetaType.float: str,
FieldMetaType.string: escape,
FieldMetaType.boolean: str,
FieldMetaType.datetime: datetimeFunc,
FieldMetaType.sdr: serializeSdr,
FieldMetaType.list: stripList}
self._adapters = [m[t] for t in types]
self._missingValues = missingValues
#
# If the bookmark is set, we need to skip over first N records
#
if bookmark is not None:
rowsToSkip = self._getStartRow(bookmark)
elif firstRecord is not None:
rowsToSkip = firstRecord
else:
rowsToSkip = 0
while rowsToSkip > 0:
self.next()
rowsToSkip -= 1
# Dictionary to store record statistics (min and max of scalars for now)
self._stats = None
def __getstate__(self):
d = dict()
d.update(self.__dict__)
del d['_reader']
del d['_file']
return d
def __setstate__(self, state):
self.__dict__ = state
self._file = None
self._reader = None
self.rewind()
def close(self):
"""
Closes the stream.
"""
if self._file is not None:
self._file.close()
self._file = None
def rewind(self):
"""
Put us back at the beginning of the file again.
"""
# Superclass rewind
super(FileRecordStream, self).rewind()
self.close()
self._file = open(self._filename, self._mode)
self._reader = csv.reader(self._file, dialect="excel")
# Skip header rows
self._reader.next()
self._reader.next()
self._reader.next()
# Reset record count, etc.
self._recordCount = 0
def getNextRecord(self, useCache=True):
""" Returns next available data record from the file.
:returns: a data row (a list or tuple) if available; None, if no more
records in the table (End of Stream - EOS); empty sequence (list
or tuple) when timing out while waiting for the next record.
"""
assert self._file is not None
assert self._mode == self._FILE_READ_MODE
# Read the line
try:
line = self._reader.next()
except StopIteration:
if self.rewindAtEOF:
if self._recordCount == 0:
raise Exception("The source configured to reset at EOF but "
"'%s' appears to be empty" % self._filename)
self.rewind()
line = self._reader.next()
else:
return None
# Keep score of how many records were read
self._recordCount += 1
# Split the line to text fields and convert each text field to a Python
# object if value is missing (empty string) encode appropriately for
# upstream consumers in the case of numeric types, this means replacing
# missing data with a sentinel value for string type, we can leave the empty
# string in place
record = []
for i, f in enumerate(line):
#print "DEBUG: Evaluating field @ index %s: %r" % (i, f)
#sys.stdout.flush()
if f in self._missingValues:
record.append(SENTINEL_VALUE_FOR_MISSING_DATA)
else:
# either there is valid data, or the field is string type,
# in which case the adapter does the right thing by default
record.append(self._adapters[i](f))
return record
def appendRecord(self, record):
"""
Saves the record in the underlying csv file.
:param record: a list of Python objects that will be string-ified
"""
assert self._file is not None
assert self._mode == self._FILE_WRITE_MODE
assert isinstance(record, (list, tuple)), \
"unexpected record type: " + repr(type(record))
assert len(record) == self._fieldCount, \
"len(record): %s, fieldCount: %s" % (len(record), self._fieldCount)
# Write header if needed
if self._recordCount == 0:
# Write the header
names, types, specials = zip(*self.getFields())
for line in names, types, specials:
self._writer.writerow(line)
# Keep track of sequences, make sure time flows forward
self._updateSequenceInfo(record)
line = [self._adapters[i](f) for i, f in enumerate(record)]
self._writer.writerow(line)
self._recordCount += 1
def appendRecords(self, records, progressCB=None):
"""
Saves multiple records in the underlying storage.
:param records: array of records as in
:meth:`~.FileRecordStream.appendRecord`
:param progressCB: (function) callback to report progress
"""
for record in records:
self.appendRecord(record)
if progressCB is not None:
progressCB()
def getBookmark(self):
"""
Gets a bookmark or anchor to the current position.
:returns: an anchor to the current position in the data. Passing this
anchor to a constructor makes the current position to be the first
returned record.
"""
if self._write and self._recordCount==0:
return None
rowDict = dict(filepath=os.path.realpath(self._filename),
currentRow=self._recordCount)
return json.dumps(rowDict)
def recordsExistAfter(self, bookmark):
"""
Returns whether there are more records from current position. ``bookmark``
is not used in this implementation.
:return: True if there are records left after current position.
"""
return (self.getDataRowCount() - self.getNextRecordIdx()) > 0
def seekFromEnd(self, numRecords):
"""
Seeks to ``numRecords`` from the end and returns a bookmark to the new
position.
:param numRecords: how far to seek from end of file.
:return: bookmark to desired location.
"""
self._file.seek(self._getTotalLineCount() - numRecords)
return self.getBookmark()
def setAutoRewind(self, autoRewind):
"""
Controls whether :meth:`~.FileRecordStream.getNextRecord` should
automatically rewind the source when EOF is reached.
:param autoRewind: (bool)
- if True, :meth:`~.FileRecordStream.getNextRecord` will automatically rewind
the source on EOF.
- if False, :meth:`~.FileRecordStream.getNextRecord` will not automatically
rewind the source on EOF.
"""
self.rewindAtEOF = autoRewind
def getStats(self):
"""
Parse the file using dedicated reader and collect fields stats. Never
called if user of :class:`~.FileRecordStream` does not invoke
:meth:`~.FileRecordStream.getStats` method.
:returns:
a dictionary of stats. In the current implementation, min and max
fields are supported. Example of the return dictionary is:
.. code-block:: python
{
'min' : [f1_min, f2_min, None, None, fn_min],
'max' : [f1_max, f2_max, None, None, fn_max]
}
(where fx_min/fx_max are set for scalar fields, or None if not)
"""
# Collect stats only once per File object, use fresh csv iterator
# to keep the next() method returning sequential records no matter when
# caller asks for stats
if self._stats == None:
# Stats are only available when reading csv file
assert self._mode == self._FILE_READ_MODE
inFile = open(self._filename, self._FILE_READ_MODE)
# Create a new reader; read names, types, specials
reader = csv.reader(inFile, dialect="excel")
names = [n.strip() for n in reader.next()]
types = [t.strip() for t in reader.next()]
# Skip over specials
reader.next()
# Initialize stats to all None
self._stats = dict()
self._stats['min'] = []
self._stats['max'] = []
for i in xrange(len(names)):
self._stats['min'].append(None)
self._stats['max'].append(None)
# Read the file, collect stats
while True:
try:
line = reader.next()
for i, f in enumerate(line):
if (len(types) > i and
types[i] in [FieldMetaType.integer, FieldMetaType.float] and
f not in self._missingValues):
value = self._adapters[i](f)
if self._stats['max'][i] == None or \
self._stats['max'][i] < value:
self._stats['max'][i] = value
if self._stats['min'][i] == None or \
self._stats['min'][i] > value:
self._stats['min'][i] = value
except StopIteration:
break
return self._stats
def clearStats(self):
""" Resets stats collected so far.
"""
self._stats = None
def getError(self):
"""
Not implemented. CSV file version does not provide storage for the error
information
"""
return None
def setError(self, error):
"""
Not implemented. CSV file version does not provide storage for the error
information
"""
return
def isCompleted(self):
""" Not implemented. CSV file is always considered completed."""
return True
def setCompleted(self, completed=True):
""" Not implemented: CSV file is always considered completed, nothing to do.
"""
return
def getFieldNames(self):
"""
:returns: (list) field names associated with the data.
"""
return [f.name for f in self._fields]
def getFields(self):
"""
:returns: a sequence of :class:`~.FieldMetaInfo`
``name``/``type``/``special`` tuples for each field in the stream.
"""
if self._fields is None:
return None
else:
return copy.copy(self._fields)
def _updateSequenceInfo(self, r):
"""Keep track of sequence and make sure time goes forward
Check if the current record is the beginning of a new sequence
A new sequence starts in 2 cases:
1. The sequence id changed (if there is a sequence id field)
2. The reset field is 1 (if there is a reset field)
Note that if there is no sequenceId field or resetId field then the entire
dataset is technically one big sequence. The function will not return True
for the first record in this case. This is Ok because it is important to
detect new sequences only when there are multiple sequences in the file.
"""
# Get current sequence id (if any)
newSequence = False
sequenceId = (r[self._sequenceIdIdx]
if self._sequenceIdIdx is not None else None)
if sequenceId != self._currSequence:
# verify that the new sequence didn't show up before
if sequenceId in self._sequences:
raise Exception('Broken sequence: %s, record: %s' % \
(sequenceId, r))
# add the finished sequence to the set of sequence
self._sequences.add(self._currSequence)
self._currSequence = sequenceId
# Verify that the reset is consistent (if there is one)
if self._resetIdx:
assert r[self._resetIdx] == 1
newSequence = True
else:
# Check the reset
reset = False
if self._resetIdx:
reset = r[self._resetIdx]
if reset == 1:
newSequence = True
# If it's still the same old sequence make sure the time flows forward
if not newSequence:
if self._timeStampIdx and self._currTime is not None:
t = r[self._timeStampIdx]
if t < self._currTime:
raise Exception('No time travel. Early timestamp for record: %s' % r)
if self._timeStampIdx:
self._currTime = r[self._timeStampIdx]
def _getStartRow(self, bookmark):
""" Extracts start row from the bookmark information
"""
bookMarkDict = json.loads(bookmark)
realpath = os.path.realpath(self._filename)
bookMarkFile = bookMarkDict.get('filepath', None)
if bookMarkFile != realpath:
print ("Ignoring bookmark due to mismatch between File's "
"filename realpath vs. bookmark; realpath: %r; bookmark: %r") % (
realpath, bookMarkDict)
return 0
else:
return bookMarkDict['currentRow']
def _getTotalLineCount(self):
""" Returns: count of ALL lines in dataset, including header lines
"""
# Flush the file before we open it again to count lines
if self._mode == self._FILE_WRITE_MODE:
self._file.flush()
return sum(1 for line in open(self._filename, self._FILE_READ_MODE))
def getNextRecordIdx(self):
"""
:returns: (int) the index of the record that will be read next from
:meth:`~.FileRecordStream.getNextRecord`.
"""
return self._recordCount
def getDataRowCount(self):
"""
:returns: (int) count of data rows in dataset (excluding header lines)
"""
numLines = self._getTotalLineCount()
if numLines == 0:
# this may be the case in a file opened for write before the
# header rows are written out
assert self._mode == self._FILE_WRITE_MODE and self._recordCount == 0
numDataRows = 0
else:
numDataRows = numLines - self._NUM_HEADER_ROWS
assert numDataRows >= 0
return numDataRows
def setTimeout(self, timeout):
pass
def flush(self):
"""
Flushes the file.
"""
if self._file is not None:
self._file.flush()
def __enter__(self):
"""Context guard - enter
Just return the object
"""
return self
def __exit__(self, yupe, value, traceback):
"""Context guard - exit
Ensures that the file is always closed at the end of the 'with' block.
Lets exceptions propagate.
"""
self.close()
def __iter__(self):
"""Support for the iterator protocol. Return itself"""
return self
def next(self):
"""Implement the iterator protocol """
record = self.getNextRecord()
if record is None:
raise StopIteration
return record
| 24,097 | Python | .py | 584 | 34.669521 | 85 | 0.667609 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,875 | stats_v2.py | numenta_nupic-legacy/src/nupic/data/stats_v2.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import itertools
import pprint
import operator
from collections import defaultdict
from pkg_resources import resource_filename
import numpy
from nupic.data.file_record_stream import FileRecordStream
from nupic.encoders import date as DateEncoder
VERBOSITY = 0
"""
We collect stats for each column in the datafile.
Supported stats fields are
int
float
string
datetime
bool
class ModelStatsCollector(object):
def __init__(self, fieldname):
pass
def addValue(self, value):
pass
def getStats(self,):
pass
"""
class BaseStatsCollector(object):
def __init__(self, fieldname, fieldtype, fieldspecial):
self.fieldname = fieldname
self.fieldtype = fieldtype
self.fieldspecial = fieldspecial
# we can remove value list if it is a speed or memory bottleneck
self.valueList = []
self.valueSet = set()
def addValue(self, value):
self.valueList.append(value)
self.valueSet.add(value)
def getStats(self, stats):
# Intialize a new dict for this field
stats[self.fieldname] = dict()
stats[self.fieldname]['name'] = self.fieldname
stats[self.fieldname]['type'] = self.fieldtype
stats[self.fieldname]['special'] = self.fieldspecial
# Basic stats valid for all fields
totalNumEntries = len(self.valueList)
totalNumDistinctEntries = len(self.valueSet)
stats[self.fieldname]['totalNumEntries'] = totalNumEntries
stats[self.fieldname]['totalNumDistinctEntries'] = totalNumDistinctEntries
if VERBOSITY > 1:
print "-"*40
print "Field '%s'" % self.fieldname
print "--"
print "Counts:"
print "Total number of entries:%d" % totalNumEntries
print "Total number of distinct entries:%d" % totalNumDistinctEntries
class StringStatsCollector(BaseStatsCollector):
def getStats(self, stats):
BaseStatsCollector.getStats(self, stats)
if VERBOSITY > 2:
valueCountDict = defaultdict(int)
for value in self.valueList:
valueCountDict[value] += 1
print "--"
# Print the top 5 frequent strings
topN = 5
print " Sorted list:"
for key, value in sorted(valueCountDict.iteritems(),
key=operator.itemgetter(1),
reverse=True,)[:topN]:
print "%s:%d" % (key, value)
if len(valueCountDict) > topN:
print "..."
class NumberStatsCollector(BaseStatsCollector):
def getStats(self, stats):
""" Override of getStats() in BaseStatsCollector
stats: A dictionary where all the stats are
outputted
"""
BaseStatsCollector.getStats(self, stats)
sortedNumberList = sorted(self.valueList)
listLength = len(sortedNumberList)
min = sortedNumberList[0]
max = sortedNumberList[-1]
mean = numpy.mean(self.valueList)
median = sortedNumberList[int(0.5*listLength)]
percentile1st = sortedNumberList[int(0.01*listLength)]
percentile99th = sortedNumberList[int(0.99*listLength)]
differenceList = \
[(cur - prev) for prev, cur in itertools.izip(list(self.valueSet)[:-1],
list(self.valueSet)[1:])]
if min > max:
print self.fieldname, min, max, '-----'
meanResolution = numpy.mean(differenceList)
stats[self.fieldname]['min'] = min
stats[self.fieldname]['max'] = max
stats[self.fieldname]['mean'] = mean
stats[self.fieldname]['median'] = median
stats[self.fieldname]['percentile1st'] = percentile1st
stats[self.fieldname]['percentile99th'] = percentile99th
stats[self.fieldname]['meanResolution'] = meanResolution
# TODO: Right now, always pass the data along.
# This is used for data-dependent encoders.
passData = True
if passData:
stats[self.fieldname]['data'] = self.valueList
if VERBOSITY > 2:
print '--'
print "Statistics:"
print "min:", min
print "max:", max
print "mean:", mean
print "median:", median
print "1st percentile :", percentile1st
print "99th percentile:", percentile99th
print '--'
print "Resolution:"
print "Mean Resolution:", meanResolution
if VERBOSITY > 3:
print '--'
print "Histogram:"
counts, bins = numpy.histogram(self.valueList, new=True)
print "Counts:", counts.tolist()
print "Bins:", bins.tolist()
class IntStatsCollector(NumberStatsCollector):
pass
class FloatStatsCollector(NumberStatsCollector):
pass
class BoolStatsCollector(BaseStatsCollector):
pass
class DateTimeStatsCollector(BaseStatsCollector):
def getStats(self, stats):
BaseStatsCollector.getStats(self, stats)
# We include subencoders for datetime field if there is a variation in encodings
# for that particular subencoding
# gym_melbourne_wed_train.csv has data only on the wednesdays, it doesn't
# make sense to include dayOfWeek in the permutations because it is constant
# in the entire dataset
# We check for variation in sub-encodings by passing the timestamp field
# through the maximal sub-encoder and checking for variation in post-encoding
# values
# Setup a datetime encoder with maximal resolution for each subencoder
encoder = DateEncoder.DateEncoder(season=(1,1), # width=366, resolution=1day
dayOfWeek=(1,1), # width=7, resolution=1day
timeOfDay=(1,1.0/60), # width=1440, resolution=1min
weekend=1, # width=2, binary encoding
holiday=1, # width=2, binary encoding
)
# Collect all encoder outputs
totalOrEncoderOutput = numpy.zeros(encoder.getWidth(), dtype=numpy.uint8)
for value in self.valueList:
numpy.logical_or(totalOrEncoderOutput, encoder.encode(value),
totalOrEncoderOutput)
encoderDescription = encoder.getDescription()
numSubEncoders = len(encoderDescription)
for i in range(numSubEncoders):
subEncoderName,_ = encoderDescription[i]
beginIdx = encoderDescription[i][1]
if i == (numSubEncoders - 1):
endIdx = encoder.getWidth()
else:
endIdx = encoderDescription[i+1][1]
stats[self.fieldname][subEncoderName] = \
(totalOrEncoderOutput[beginIdx:endIdx].sum()>1)
decodedInput = encoder.decode(totalOrEncoderOutput)[0]
if VERBOSITY > 2:
print "--"
print "Sub-encoders:"
for subEncoderName,_ in encoderDescription:
print "%s:%s" % (subEncoderName, stats[self.fieldname][subEncoderName])
def generateStats(filename, maxSamples = None,):
"""
Collect statistics for each of the fields in the user input data file and
return a stats dict object.
Parameters:
------------------------------------------------------------------------------
filename: The path and name of the data file.
maxSamples: Upper bound on the number of rows to be processed
retval: A dictionary of dictionaries. The top level keys are the
field names and the corresponding values are the statistics
collected for the individual file.
Example:
{
'consumption':{'min':0,'max':90,'mean':50,...},
'gym':{'numDistinctCategories':10,...},
...
}
"""
# Mapping from field type to stats collector object
statsCollectorMapping = {'float': FloatStatsCollector,
'int': IntStatsCollector,
'string': StringStatsCollector,
'datetime': DateTimeStatsCollector,
'bool': BoolStatsCollector,
}
filename = resource_filename("nupic.datafiles", filename)
print "*"*40
print "Collecting statistics for file:'%s'" % (filename,)
dataFile = FileRecordStream(filename)
# Initialize collector objects
# statsCollectors list holds statsCollector objects for each field
statsCollectors = []
for fieldName, fieldType, fieldSpecial in dataFile.getFields():
# Find the corresponding stats collector for each field based on field type
# and intialize an instance
statsCollector = \
statsCollectorMapping[fieldType](fieldName, fieldType, fieldSpecial)
statsCollectors.append(statsCollector)
# Now collect the stats
if maxSamples is None:
maxSamples = 500000
for i in xrange(maxSamples):
record = dataFile.getNextRecord()
if record is None:
break
for i, value in enumerate(record):
statsCollectors[i].addValue(value)
# stats dict holds the statistics for each field
stats = {}
for statsCollector in statsCollectors:
statsCollector.getStats(stats)
# We don't want to include reset field in permutations
# TODO: handle reset field in a clean way
if dataFile.getResetFieldIdx() is not None:
resetFieldName,_,_ = dataFile.getFields()[dataFile.reset]
stats.pop(resetFieldName)
if VERBOSITY > 0:
pprint.pprint(stats)
return stats
def testGym1():
generateStats("extra/gym/gym_melbourne_wed_train.csv")
def testGym2():
generateStats("extra/gym/gym_melbourne_train.csv")
def testGym3():
generateStats("extra/gym/gym_train.csv")
def testIris():
generateStats("extra/iris/iris_train.csv")
def testMovieLens():
generateStats("extra/movielens100k/movie_train.csv")
if __name__=="__main__":
testIris()
testGym1()
testGym2()
testGym3()
testMovieLens()
| 10,688 | Python | .py | 263 | 33.741445 | 89 | 0.661904 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,876 | stream_reader.py | numenta_nupic-legacy/src/nupic/data/stream_reader.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import logging
import tempfile
import pkg_resources
from nupic.data.aggregator import Aggregator
from nupic.data.field_meta import FieldMetaInfo, FieldMetaType, FieldMetaSpecial
from nupic.data.file_record_stream import FileRecordStream
from nupic.data import json_helpers
from nupic.data.record_stream import RecordStreamIface
from nupic.frameworks.opf import jsonschema
import nupic.support
FILE_PREF = 'file://'
# If timeout is not set in the configuration file, default is 6 hours
READ_TIMEOUT = 6*60*60
class StreamTimeoutException(Exception):
""" Defines the exception thrown when the input stream times out receiving
new records."""
pass
class StreamReader(RecordStreamIface):
"""
Implements a stream reader. This is a high level class that owns one or more
underlying implementations of a
:class:`~nupic.data.record_stream.RecordStreamIface`. Each
:class:`~nupic.data.record_stream.RecordStreamIface` implements the raw
reading of records from the record store (which could be a file, hbase table
or something else).
In the future, we will support joining of two or more
:class:`~nupic.data.record_stream.RecordStreamIface`'s (which is why the
``streamDef`` accepts a list of 'stream' elements), but for now only 1 source
is supported.
The class also implements aggregation of the (in the future) joined records
from the sources.
This module parses the stream definition (as defined in
``/src/nupic/frameworks/opf/jsonschema/stream_def.json``), creates the
:class:`~nupic.data.record_stream.RecordStreamIface` for each source
('stream' element) defined in the stream def, performs aggregation, and
returns each record in the correct format according to the desired column
names specified in the streamDef.
This class implements the :class:`~nupic.data.record_stream.RecordStreamIface`
interface and thus can be used in place of a raw record stream.
This is an example streamDef:
.. code-block:: python
{
'version': 1
'info': 'test_hotgym',
'streams': [
{'columns': [u'*'],
'info': u'hotGym.csv',
'last_record': 4000,
'source': u'file://extra/hotgym/hotgym.csv'}.
],
'timeField': 'timestamp',
'aggregation': {
'hours': 1,
'fields': [
('timestamp', 'first'),
('gym', 'first'),
('consumption', 'sum')
],
}
}
:param streamDef: The stream definition, potentially containing multiple
sources (not supported yet). See
``src//nupic/frameworks/opf/jsonschema/stream_def.json`` for the
format of this dict
:param bookmark: Bookmark to start reading from. This overrides the
first_record field of the streamDef if provided.
:param saveOutput: If true, save the output to a csv file in a temp
directory. The path to the generated file can be found in the log
output.
:param isBlocking: should read operation block *forever* if the next row of
data is not available, but the stream is not marked as 'completed'
yet?
:param maxTimeout: if isBlocking is False, max seconds to wait for more data
before timing out; ignored when isBlocking is True.
:param eofOnTimeout: If True and we get a read timeout (isBlocking must be
False to get read timeouts), assume we've reached the end of the
input and produce the last aggregated record, if one can be
completed.
"""
def __init__(self, streamDef, bookmark=None, saveOutput=False,
isBlocking=True, maxTimeout=0, eofOnTimeout=False):
# Call superclass constructor
super(StreamReader, self).__init__()
loggerPrefix = 'com.numenta.nupic.data.StreamReader'
self._logger = logging.getLogger(loggerPrefix)
json_helpers.validate(streamDef,
schemaPath=pkg_resources.resource_filename(
jsonschema.__name__, "stream_def.json"))
assert len(streamDef['streams']) == 1, "Only 1 source stream is supported"
# Save constructor args
sourceDict = streamDef['streams'][0]
self._recordCount = 0
self._eofOnTimeout = eofOnTimeout
self._logger.debug('Reading stream with the def: %s', sourceDict)
# Dictionary to store record statistics (min and max of scalars for now)
self._stats = None
# ---------------------------------------------------------------------
# Get the stream definition params
# Limiting window of the stream. It would not return any records until
# 'first_record' ID is read (or very first with the ID above that). The
# stream will return EOS once it reads record with ID 'last_record' or
# above (NOTE: the name 'lastRecord' is misleading because it is NOT
# inclusive).
firstRecordIdx = sourceDict.get('first_record', None)
self._sourceLastRecordIdx = sourceDict.get('last_record', None)
# If a bookmark was given, then override first_record from the stream
# definition.
if bookmark is not None:
firstRecordIdx = None
# Column names must be provided in the streamdef json
# Special case is ['*'], meaning all available names from the record stream
self._streamFieldNames = sourceDict.get('columns', None)
if self._streamFieldNames != None and self._streamFieldNames[0] == '*':
self._needFieldsFiltering = False
else:
self._needFieldsFiltering = True
# Types must be specified in streamdef json, or in case of the
# file_recod_stream types could be implicit from the file
streamFieldTypes = sourceDict.get('types', None)
self._logger.debug('Types from the def: %s', streamFieldTypes)
# Validate that all types are valid
if streamFieldTypes is not None:
for dataType in streamFieldTypes:
assert FieldMetaType.isValid(dataType)
# Reset, sequence and time fields might be provided by streamdef json
streamResetFieldName = streamDef.get('resetField', None)
streamTimeFieldName = streamDef.get('timeField', None)
streamSequenceFieldName = streamDef.get('sequenceIdField', None)
self._logger.debug('r, t, s fields: %s, %s, %s', streamResetFieldName,
streamTimeFieldName,
streamSequenceFieldName)
# =======================================================================
# Open up the underlying record store
dataUrl = sourceDict.get('source', None)
assert dataUrl is not None
self._recordStore = self._openStream(dataUrl, isBlocking, maxTimeout,
bookmark, firstRecordIdx)
assert self._recordStore is not None
# =======================================================================
# Prepare the data structures we need for returning just the fields
# the caller wants from each record
recordStoreFields = self._recordStore.getFields()
self._recordStoreFieldNames = self._recordStore.getFieldNames()
if not self._needFieldsFiltering:
self._streamFieldNames = self._recordStoreFieldNames
# Build up the field definitions for each field. This is a list of tuples
# of (name, type, special)
self._streamFields = []
for dstIdx, name in enumerate(self._streamFieldNames):
if name not in self._recordStoreFieldNames:
raise RuntimeError("The column '%s' from the stream definition "
"is not present in the underlying stream which has the following "
"columns: %s" % (name, self._recordStoreFieldNames))
fieldIdx = self._recordStoreFieldNames.index(name)
fieldType = recordStoreFields[fieldIdx].type
fieldSpecial = recordStoreFields[fieldIdx].special
# If the types or specials were defined in the stream definition,
# then override what was found in the record store
if streamFieldTypes is not None:
fieldType = streamFieldTypes[dstIdx]
if streamResetFieldName is not None and streamResetFieldName == name:
fieldSpecial = FieldMetaSpecial.reset
if streamTimeFieldName is not None and streamTimeFieldName == name:
fieldSpecial = FieldMetaSpecial.timestamp
if (streamSequenceFieldName is not None and
streamSequenceFieldName == name):
fieldSpecial = FieldMetaSpecial.sequence
self._streamFields.append(FieldMetaInfo(name, fieldType, fieldSpecial))
# ========================================================================
# Create the aggregator which will handle aggregation of records before
# returning them.
self._aggregator = Aggregator(
aggregationInfo=streamDef.get('aggregation', None),
inputFields=recordStoreFields,
timeFieldName=streamDef.get('timeField', None),
sequenceIdFieldName=streamDef.get('sequenceIdField', None),
resetFieldName=streamDef.get('resetField', None))
# We rely on the aggregator to tell us the bookmark of the last raw input
# that contributed to the aggregated record
self._aggBookmark = None
# Compute the aggregation period in terms of months and seconds
if 'aggregation' in streamDef:
self._aggMonthsAndSeconds = nupic.support.aggregationToMonthsSeconds(
streamDef.get('aggregation'))
else:
self._aggMonthsAndSeconds = None
# ========================================================================
# Are we saving the generated output to a csv?
if saveOutput:
tmpDir = tempfile.mkdtemp()
outFilename = os.path.join(tmpDir, "generated_output.csv")
self._logger.info("StreamReader: Saving generated records to: '%s'" %
outFilename)
self._writer = FileRecordStream(streamID=outFilename,
write=True,
fields=self._streamFields)
else:
self._writer = None
@staticmethod
def _openStream(dataUrl,
isBlocking, # pylint: disable=W0613
maxTimeout, # pylint: disable=W0613
bookmark,
firstRecordIdx):
"""Open the underlying file stream
This only supports 'file://' prefixed paths.
:returns: record stream instance
:rtype: FileRecordStream
"""
filePath = dataUrl[len(FILE_PREF):]
if not os.path.isabs(filePath):
filePath = os.path.join(os.getcwd(), filePath)
return FileRecordStream(streamID=filePath,
write=False,
bookmark=bookmark,
firstRecord=firstRecordIdx)
def close(self):
""" Close the stream
"""
return self._recordStore.close()
def getNextRecord(self):
""" Returns combined data from all sources (values only).
:returns: None on EOF; empty sequence on timeout.
"""
# Keep reading from the raw input till we get enough for an aggregated
# record
while True:
# Reached EOF due to lastRow constraint?
if self._sourceLastRecordIdx is not None and \
self._recordStore.getNextRecordIdx() >= self._sourceLastRecordIdx:
preAggValues = None # indicates EOF
bookmark = self._recordStore.getBookmark()
else:
# Get the raw record and bookmark
preAggValues = self._recordStore.getNextRecord()
bookmark = self._recordStore.getBookmark()
if preAggValues == (): # means timeout error occurred
if self._eofOnTimeout:
preAggValues = None # act as if we got EOF
else:
return preAggValues # Timeout indicator
self._logger.debug('Read source record #%d: %r',
self._recordStore.getNextRecordIdx()-1, preAggValues)
# Perform aggregation
(fieldValues, aggBookmark) = self._aggregator.next(preAggValues, bookmark)
# Update the aggregated record bookmark if we got a real record back
if fieldValues is not None:
self._aggBookmark = aggBookmark
# Reached EOF?
if preAggValues is None and fieldValues is None:
return None
# Return it if we have a record
if fieldValues is not None:
break
# Do we need to re-order the fields in the record?
if self._needFieldsFiltering:
values = []
srcDict = dict(zip(self._recordStoreFieldNames, fieldValues))
for name in self._streamFieldNames:
values.append(srcDict[name])
fieldValues = values
# Write to debug output?
if self._writer is not None:
self._writer.appendRecord(fieldValues)
self._recordCount += 1
self._logger.debug('Returning aggregated record #%d from getNextRecord(): '
'%r. Bookmark: %r',
self._recordCount-1, fieldValues, self._aggBookmark)
return fieldValues
def getDataRowCount(self):
"""
Iterates through stream to calculate total records after aggregation.
This will alter the bookmark state.
"""
inputRowCountAfterAggregation = 0
while True:
record = self.getNextRecord()
if record is None:
return inputRowCountAfterAggregation
inputRowCountAfterAggregation += 1
if inputRowCountAfterAggregation > 10000:
raise RuntimeError('No end of datastream found.')
def getNextRecordIdx(self):
"""
:returns: the index of the record that will be read next from
:meth:`getNextRecord`.
"""
return self._recordCount
def recordsExistAfter(self, bookmark):
"""
:returns: True if there are records left after the bookmark.
"""
return self._recordStore.recordsExistAfter(bookmark)
def getAggregationMonthsAndSeconds(self):
""" Returns the aggregation period of the record stream as a dict
containing 'months' and 'seconds'. The months is always an integer and
seconds is a floating point. Only one is allowed to be non-zero at a
time.
Will return the aggregation period from this call. This call is
used by the :meth:`nupic.data.record_stream.RecordStream.getNextRecordDict`
method to assign a record number to a record given its timestamp and the
aggregation interval.
:returns: aggregationPeriod (as a dict) where:
- ``months``: number of months in aggregation period
- ``seconds``: number of seconds in aggregation period
(as a float)
"""
return self._aggMonthsAndSeconds
def appendRecord(self, record):
raise RuntimeError("Not implemented in StreamReader")
def appendRecords(self, records, progressCB=None):
raise RuntimeError("Not implemented in StreamReader")
def seekFromEnd(self, numRecords):
raise RuntimeError("Not implemented in StreamReader")
def getFieldNames(self):
"""
Returns all fields in all inputs (list of plain names).
.. note:: currently, only one input is supported
"""
return [f.name for f in self._streamFields]
def getFields(self):
"""
:returns: a sequence of :class:`nupic.data.fieldmeta.FieldMetaInfo` for each
field in the stream.
"""
return self._streamFields
def getBookmark(self):
"""
:returns: a bookmark to the current position
"""
return self._aggBookmark
def clearStats(self):
""" Resets stats collected so far.
"""
self._recordStore.clearStats()
def getStats(self):
"""
TODO: This method needs to be enhanced to get the stats on the *aggregated*
records.
:returns: stats (like min and max values of the fields).
"""
# The record store returns a dict of stats, each value in this dict is
# a list with one item per field of the record store
# {
# 'min' : [f1_min, f2_min, f3_min],
# 'max' : [f1_max, f2_max, f3_max]
# }
recordStoreStats = self._recordStore.getStats()
# We need to convert each item to represent the fields of the *stream*
streamStats = dict()
for (key, values) in recordStoreStats.items():
fieldStats = dict(zip(self._recordStoreFieldNames, values))
streamValues = []
for name in self._streamFieldNames:
streamValues.append(fieldStats[name])
streamStats[key] = streamValues
return streamStats
def getError(self):
"""
:returns: errors saved in the stream.
"""
return self._recordStore.getError()
def setError(self, error):
""" Saves specified error in the stream.
:param error: to save
"""
self._recordStore.setError(error)
def isCompleted(self):
"""
:returns: True if all records have been read.
"""
return self._recordStore.isCompleted()
def setCompleted(self, completed=True):
"""
Marks the stream completed (True or False)
:param completed: (bool) is completed or not
"""
# CSV file is always considered completed, nothing to do
self._recordStore.setCompleted(completed)
def setTimeout(self, timeout):
"""Set the read timeout.
:param timeout: (float or int) timeout length
"""
self._recordStore.setTimeout(timeout)
def flush(self):
raise RuntimeError("Not implemented in StreamReader")
| 18,330 | Python | .py | 407 | 38.02457 | 80 | 0.667304 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,877 | dict_utils.py | numenta_nupic-legacy/src/nupic/data/dict_utils.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
# TODO: Note the functions 'rUpdate' are duplicated in
# the swarming.hypersearch.utils.py module
class DictObj(dict):
"""Dictionary that allows attribute-like access to its elements.
Attributes are read-only."""
def __getattr__(self, name):
if name == '__deepcopy__':
return super(DictObj, self).__getattribute__("__deepcopy__")
return self[name]
def __setstate__(self, state):
for k, v in state.items():
self[k] = v
def rUpdate(original, updates):
"""Recursively updates the values in original with the values from updates."""
# Keep a list of the sub-dictionaries that need to be updated to avoid having
# to use recursion (which could fail for dictionaries with a lot of nesting.
dictPairs = [(original, updates)]
while len(dictPairs) > 0:
original, updates = dictPairs.pop()
for k, v in updates.iteritems():
if k in original and isinstance(original[k], dict) and isinstance(v, dict):
dictPairs.append((original[k], v))
else:
original[k] = v
def rApply(d, f):
"""Recursively applies f to the values in dict d.
Args:
d: The dict to recurse over.
f: A function to apply to values in d that takes the value and a list of
keys from the root of the dict to the value.
"""
remainingDicts = [(d, ())]
while len(remainingDicts) > 0:
current, prevKeys = remainingDicts.pop()
for k, v in current.iteritems():
keys = prevKeys + (k,)
if isinstance(v, dict):
remainingDicts.insert(0, (v, keys))
else:
f(v, keys)
def find(d, target):
remainingDicts = [d]
while len(remainingDicts) > 0:
current = remainingDicts.pop()
for k, v in current.iteritems():
if k == target:
return v
if isinstance(v, dict):
remainingDicts.insert(0, v)
return None
def get(d, keys):
for key in keys:
d = d[key]
return d
def set(d, keys, value):
for key in keys[:-1]:
d = d[key]
d[keys[-1]] = value
def dictDiffAndReport(da, db):
""" Compares two python dictionaries at the top level and report differences,
if any, to stdout
da: first dictionary
db: second dictionary
Returns: The same value as returned by dictDiff() for the given args
"""
differences = dictDiff(da, db)
if not differences:
return differences
if differences['inAButNotInB']:
print ">>> inAButNotInB: %s" % differences['inAButNotInB']
if differences['inBButNotInA']:
print ">>> inBButNotInA: %s" % differences['inBButNotInA']
for key in differences['differentValues']:
print ">>> da[%s] != db[%s]" % (key, key)
print "da[%s] = %r" % (key, da[key])
print "db[%s] = %r" % (key, db[key])
return differences
def dictDiff(da, db):
""" Compares two python dictionaries at the top level and return differences
da: first dictionary
db: second dictionary
Returns: None if dictionaries test equal; otherwise returns a
dictionary as follows:
{
'inAButNotInB':
<sequence of keys that are in da but not in db>
'inBButNotInA':
<sequence of keys that are in db but not in da>
'differentValues':
<sequence of keys whose corresponding values differ
between da and db>
}
"""
different = False
resultDict = dict()
resultDict['inAButNotInB'] = set(da) - set(db)
if resultDict['inAButNotInB']:
different = True
resultDict['inBButNotInA'] = set(db) - set(da)
if resultDict['inBButNotInA']:
different = True
resultDict['differentValues'] = []
for key in (set(da) - resultDict['inAButNotInB']):
comparisonResult = da[key] == db[key]
if isinstance(comparisonResult, bool):
isEqual = comparisonResult
else:
# This handles numpy arrays (but only at the top level)
isEqual = comparisonResult.all()
if not isEqual:
resultDict['differentValues'].append(key)
different = True
assert (((resultDict['inAButNotInB'] or resultDict['inBButNotInA'] or
resultDict['differentValues']) and different) or not different)
return resultDict if different else None
| 5,295 | Python | .py | 136 | 33.566176 | 81 | 0.645841 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,878 | aggregator.py | numenta_nupic-legacy/src/nupic/data/aggregator.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import datetime
import os
from pkg_resources import resource_filename
import time
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.field_meta import FieldMetaSpecial
from nupic.data.file_record_stream import FileRecordStream
"""The aggregator aggregates PF datasets
It supports aggregation of multiple records based on time.
Common use cases:
- Aggregate records by month
- Aggregate records every 3 months starting April 15th
- Aggregate records in 2.5 seconds intervals
Assumption: aggregated slices fit in memory. All the records that are aggregated
per period are stored in memory until the next slice starts and are only
aggregated then. If this assumption is too strong the script will need to write
slices to a temp storage or use incremental aggregation techniques.
"""
def initFilter(input, filterInfo = None):
""" Initializes internal filter variables for further processing.
Returns a tuple (function to call,parameters for the filter call)
The filterInfo is a dict. Here is an example structure:
{fieldName: {'min': x,
'max': y,
'type': 'category', # or 'number'
'acceptValues': ['foo', 'bar'],
}
}
This returns the following:
(filterFunc, ((fieldIdx, fieldFilterFunc, filterDict),
...)
Where fieldIdx is the index of the field within each record
fieldFilterFunc returns True if the value is "OK" (within min, max or
part of acceptValues)
fieldDict is a dict containing 'type', 'min', max', 'acceptValues'
"""
if filterInfo is None:
return None
# Build an array of index/func to call on record[index]
filterList = []
for i, fieldName in enumerate(input.getFieldNames()):
fieldFilter = filterInfo.get(fieldName, None)
if fieldFilter == None:
continue
var = dict()
var['acceptValues'] = None
min = fieldFilter.get('min', None)
max = fieldFilter.get('max', None)
var['min'] = min
var['max'] = max
if fieldFilter['type'] == 'category':
var['acceptValues'] = fieldFilter['acceptValues']
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] in x['acceptValues'])
elif fieldFilter['type'] == 'number':
if min != None and max != None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'] and x['value'] <= x['max'])
elif min != None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'])
else:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] <= x['max'])
filterList.append((i, fp, var))
return (_filterRecord, filterList)
def _filterRecord(filterList, record):
""" Takes a record and returns true if record meets filter criteria,
false otherwise
"""
for (fieldIdx, fp, params) in filterList:
x = dict()
x['value'] = record[fieldIdx]
x['acceptValues'] = params['acceptValues']
x['min'] = params['min']
x['max'] = params['max']
if not fp(x):
return False
# None of the field filters triggered, accept the record as a good one
return True
def _aggr_first(inList):
""" Returns first non-None element in the list, or None if all are None
"""
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
return elem
return None
def _aggr_last(inList):
""" Returns last non-None element in the list, or None if all are None
"""
for elem in reversed(inList):
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
return elem
return None
def _aggr_sum(inList):
""" Returns sum of the elements in the list. Missing items are replaced with
the mean value
"""
aggrMean = _aggr_mean(inList)
if aggrMean == None:
return None
aggrSum = 0
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
aggrSum += elem
else:
aggrSum += aggrMean
return aggrSum
def _aggr_mean(inList):
""" Returns mean of non-None elements of the list
"""
aggrSum = 0
nonNone = 0
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
aggrSum += elem
nonNone += 1
if nonNone != 0:
return aggrSum / nonNone
else:
return None
def _aggr_mode(inList):
""" Returns most common value seen in the non-None elements of the list
"""
valueCounts = dict()
nonNone = 0
for elem in inList:
if elem == SENTINEL_VALUE_FOR_MISSING_DATA:
continue
nonNone += 1
if elem in valueCounts:
valueCounts[elem] += 1
else:
valueCounts[elem] = 1
# Get the most common one
if nonNone == 0:
return None
# Sort by counts
sortedCounts = valueCounts.items()
sortedCounts.sort(cmp=lambda x,y: x[1] - y[1], reverse=True)
return sortedCounts[0][0]
def _aggr_weighted_mean(inList, params):
""" Weighted mean uses params (must be the same size as inList) and
makes weighed mean of inList"""
assert(len(inList) == len(params))
# If all weights are 0, then the value is not defined, return None (missing)
weightsSum = sum(params)
if weightsSum == 0:
return None
weightedMean = 0
for i, elem in enumerate(inList):
weightedMean += elem * params[i]
return weightedMean / weightsSum
class Aggregator(object):
"""
This class provides context and methods for aggregating records. The caller
should construct an instance of Aggregator and then call the next() method
repeatedly to get each aggregated record.
This is an example aggregationInfo dict:
{
'hours': 1,
'minutes': 15,
'fields': [
('timestamp', 'first'),
('gym', 'first'),
('consumption', 'sum')
],
}
"""
def __init__(self, aggregationInfo, inputFields, timeFieldName=None,
sequenceIdFieldName=None, resetFieldName=None, filterInfo=None):
""" Construct an aggregator instance
Params:
- aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
- aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] | [weeks days hours minutes seconds milliseconds
microseconds]
NOTE: years and months are mutually-exclusive with the other units. See
getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
- inputFields: The fields from the data source. This is a sequence of
`nupic.data.fieldmeta.FieldMetaInfo` instances.
- timeFieldName: name of the field to use as the time field. If None,
then the time field will be queried from the reader.
- sequenceIdFieldName: name of the field to use as the sequenecId. If None,
then the time field will be queried from the reader.
- resetFieldName: name of the field to use as the reset field. If None,
then the time field will be queried from the reader.
- filterInfo: a structure with rules for filtering records out
If the input file contains a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for these will be to pick the first:
lambda x: x[0]
"""
# -----------------------------------------------------------------------
# Save member variables.
# The same aggregationInfo dict may be used by the caller for generating
# more datasets (with slight changes), so it is safer to copy it here and
# all changes made here will not affect the input aggregationInfo
self._filterInfo = filterInfo
self._nullAggregation = False
self._inputFields = inputFields
# See if this is a null aggregation
self._nullAggregation = False
if aggregationInfo is None:
self._nullAggregation = True
else:
aggDef = defaultdict(lambda: 0, aggregationInfo)
if (aggDef['years'] == aggDef['months'] == aggDef['weeks'] ==
aggDef['days'] == aggDef['hours'] == aggDef['minutes'] ==
aggDef['seconds'] == aggDef['milliseconds'] ==
aggDef['microseconds'] == 0):
self._nullAggregation = True
# Prepare the field filtering info. The filter allows us to ignore records
# based on specified min or max values for each field.
self._filter = initFilter(self._inputFields, self._filterInfo)
# ----------------------------------------------------------------------
# Fill in defaults
self._fields = None
self._resetFieldIdx = None
self._timeFieldIdx = None
self._sequenceIdFieldIdx = None
self._aggTimeDelta = datetime.timedelta()
self._aggYears = 0
self._aggMonths = 0
# Init state variables used within next()
self._aggrInputBookmark = None
self._startTime = None
self._endTime = None
self._sequenceId = None
self._firstSequenceStartTime = None
self._inIdx = -1
self._slice = defaultdict(list)
# ========================================================================
# Get aggregation params
# self._fields will be a list of tuples: (fieldIdx, funcPtr, funcParam)
if not self._nullAggregation:
# ---------------------------------------------------------------------
# Verify that all aggregation field names exist in the input
fieldNames = [f[0] for f in aggregationInfo['fields']]
readerFieldNames = [f[0] for f in self._inputFields]
for name in fieldNames:
if not name in readerFieldNames:
raise Exception('No such input field: %s' % (name))
# ---------------------------------------------------------------------
# Get the indices of the special fields, if given to our constructor
if timeFieldName is not None:
self._timeFieldIdx = readerFieldNames.index(timeFieldName)
if resetFieldName is not None:
self._resetFieldIdx = readerFieldNames.index(resetFieldName)
if sequenceIdFieldName is not None:
self._sequenceIdFieldIdx = readerFieldNames.index(sequenceIdFieldName)
# ---------------------------------------------------------------------
# Re-order the fields to match the order in the reader and add in any
# fields from the reader that were not already in the aggregationInfo
# fields list.
self._fields = []
fieldIdx = -1
for (name, type, special) in self._inputFields:
fieldIdx += 1
# See if it exists in the aggregationInfo
found = False
for field in aggregationInfo['fields']:
if field[0] == name:
aggFunctionName = field[1]
found = True
break
if not found:
aggFunctionName = 'first'
# Convert to a function pointer and optional params
(funcPtr, params) = self._getFuncPtrAndParams(aggFunctionName)
# Add it
self._fields.append((fieldIdx, funcPtr, params))
# Is it a special field that we are still looking for?
if special == FieldMetaSpecial.reset and self._resetFieldIdx is None:
self._resetFieldIdx = fieldIdx
if special == FieldMetaSpecial.timestamp and self._timeFieldIdx is None:
self._timeFieldIdx = fieldIdx
if (special == FieldMetaSpecial.sequence and
self._sequenceIdFieldIdx is None):
self._sequenceIdFieldIdx = fieldIdx
assert self._timeFieldIdx is not None, "No time field was found"
# Create an instance of _AggregationPeriod with the aggregation period
self._aggTimeDelta = datetime.timedelta(days=aggDef['days'],
hours=aggDef['hours'],
minutes=aggDef['minutes'],
seconds=aggDef['seconds'],
milliseconds=aggDef['milliseconds'],
microseconds=aggDef['microseconds'],
weeks=aggDef['weeks'])
self._aggYears = aggDef['years']
self._aggMonths = aggDef['months']
if self._aggTimeDelta:
assert self._aggYears == 0
assert self._aggMonths == 0
def _getEndTime(self, t):
"""Add the aggregation period to the input time t and return a datetime object
Years and months are handled as aspecial case due to leap years
and months with different number of dates. They can't be converted
to a strict timedelta because a period of 3 months will have different
durations actually. The solution is to just add the years and months
fields directly to the current time.
Other periods are converted to timedelta and just added to current time.
"""
assert isinstance(t, datetime.datetime)
if self._aggTimeDelta:
return t + self._aggTimeDelta
else:
year = t.year + self._aggYears + (t.month - 1 + self._aggMonths) / 12
month = (t.month - 1 + self._aggMonths) % 12 + 1
return t.replace(year=year, month=month)
def _getFuncPtrAndParams(self, funcName):
""" Given the name of an aggregation function, returns the function pointer
and param.
Parameters:
------------------------------------------------------------------------
funcName: a string (name of function) or funcPtr
retval: (funcPtr, param)
"""
params = None
if isinstance(funcName, basestring):
if funcName == 'sum':
fp = _aggr_sum
elif funcName == 'first':
fp = _aggr_first
elif funcName == 'last':
fp = _aggr_last
elif funcName == 'mean':
fp = _aggr_mean
elif funcName == 'max':
fp = max
elif funcName == 'min':
fp = min
elif funcName == 'mode':
fp = _aggr_mode
elif funcName.startswith('wmean:'):
fp = _aggr_weighted_mean
paramsName = funcName[6:]
params = [f[0] for f in self._inputFields].index(paramsName)
else:
fp = funcName
return (fp, params)
def _createAggregateRecord(self):
""" Generate the aggregated output record
Parameters:
------------------------------------------------------------------------
retval: outputRecord
"""
record = []
for i, (fieldIdx, aggFP, paramIdx) in enumerate(self._fields):
if aggFP is None: # this field is not supposed to be aggregated.
continue
values = self._slice[i]
refIndex = None
if paramIdx is not None:
record.append(aggFP(values, self._slice[paramIdx]))
else:
record.append(aggFP(values))
return record
def isNullAggregation(self):
""" Return True if no aggregation will be performed, either because the
aggregationInfo was None or all aggregation params within it were 0.
"""
return self._nullAggregation
def next(self, record, curInputBookmark):
""" Return the next aggregated record, if any
Parameters:
------------------------------------------------------------------------
record: The input record (values only) from the input source, or
None if the input has reached EOF (this will cause this
method to force completion of and return any partially
aggregated time period)
curInputBookmark: The bookmark to the next input record
retval:
(outputRecord, inputBookmark)
outputRecord: the aggregated record
inputBookmark: a bookmark to the last position from the input that
contributed to this aggregated record.
If we don't have any aggregated records yet, returns (None, None)
The caller should generally do a loop like this:
while True:
inRecord = reader.getNextRecord()
bookmark = reader.getBookmark()
(aggRecord, aggBookmark) = aggregator.next(inRecord, bookmark)
# reached EOF?
if inRecord is None and aggRecord is None:
break
if aggRecord is not None:
proessRecord(aggRecord, aggBookmark)
This method makes use of the self._slice member variable to build up
the values we need to aggregate. This is a dict of lists. The keys are
the field indices and the elements of each list are the values for that
field. For example:
self._siice = { 0: [42, 53], 1: [4.0, 5.1] }
"""
# This will hold the aggregated record we return
outRecord = None
# This will hold the bookmark of the last input used within the
# aggregated record we return.
retInputBookmark = None
if record is not None:
# Increment input count
self._inIdx += 1
#print self._inIdx, record
# Apply the filter, ignore the record if any field is unacceptable
if self._filter != None and not self._filter[0](self._filter[1], record):
return (None, None)
# If no aggregation info just return as-is
if self._nullAggregation:
return (record, curInputBookmark)
# ----------------------------------------------------------------------
# Do aggregation
#
# Remember the very first record time stamp - it will be used as
# the timestamp for all first records in all sequences to align
# times for the aggregation/join of sequences.
#
# For a set of aggregated records, it will use the beginning of the time
# window as a timestamp for the set
#
t = record[self._timeFieldIdx]
if self._firstSequenceStartTime == None:
self._firstSequenceStartTime = t
# Create initial startTime and endTime if needed
if self._startTime is None:
self._startTime = t
if self._endTime is None:
self._endTime = self._getEndTime(t)
assert self._endTime > t
#print 'Processing line:', i, t, endTime
#from dbgp.client import brk; brk(port=9011)
# ----------------------------------------------------------------------
# Does this record have a reset signal or sequence Id associated with it?
# If so, see if we've reached a sequence boundary
if self._resetFieldIdx is not None:
resetSignal = record[self._resetFieldIdx]
else:
resetSignal = None
if self._sequenceIdFieldIdx is not None:
currSequenceId = record[self._sequenceIdFieldIdx]
else:
currSequenceId = None
newSequence = (resetSignal == 1 and self._inIdx > 0) \
or self._sequenceId != currSequenceId \
or self._inIdx == 0
if newSequence:
self._sequenceId = currSequenceId
# --------------------------------------------------------------------
# We end the aggregation chunk if we go past the end time
# -OR- we get an out of order record (t < startTime)
sliceEnded = (t >= self._endTime or t < self._startTime)
# -------------------------------------------------------------------
# Time to generate a new output record?
if (newSequence or sliceEnded) and len(self._slice) > 0:
# Create aggregated record
# print 'Creating aggregate record...'
# Make first record timestamp as the beginning of the time period,
# in case the first record wasn't falling on the beginning of the period
for j, f in enumerate(self._fields):
index = f[0]
if index == self._timeFieldIdx:
self._slice[j][0] = self._startTime
break
# Generate the aggregated record
outRecord = self._createAggregateRecord()
retInputBookmark = self._aggrInputBookmark
# Reset the slice
self._slice = defaultdict(list)
# --------------------------------------------------------------------
# Add current record to slice (Note keeping slices in memory). Each
# field in the slice is a list of field values from all the sliced
# records
for j, f in enumerate(self._fields):
index = f[0]
# append the parsed field value to the proper aggregated slice field.
self._slice[j].append(record[index])
self._aggrInputBookmark = curInputBookmark
# --------------------------------------------------------------------
# If we've encountered a new sequence, start aggregation over again
if newSequence:
# TODO: May use self._firstSequenceStartTime as a start for the new
# sequence (to align all sequences)
self._startTime = t
self._endTime = self._getEndTime(t)
# --------------------------------------------------------------------
# If a slice just ended, re-compute the start and end time for the
# next aggregated record
if sliceEnded:
# Did we receive an out of order record? If so, go back and iterate
# till we get to the next end time boundary.
if t < self._startTime:
self._endTime = self._firstSequenceStartTime
while t >= self._endTime:
self._startTime = self._endTime
self._endTime = self._getEndTime(self._endTime)
# If we have a record to return, do it now
if outRecord is not None:
return (outRecord, retInputBookmark)
# ---------------------------------------------------------------------
# Input reached EOF
# Aggregate one last time in the end if necessary
elif self._slice:
# Make first record timestamp as the beginning of the time period,
# in case the first record wasn't falling on the beginning of the period
for j, f in enumerate(self._fields):
index = f[0]
if index == self._timeFieldIdx:
self._slice[j][0] = self._startTime
break
outRecord = self._createAggregateRecord()
retInputBookmark = self._aggrInputBookmark
self._slice = defaultdict(list)
# Return aggregated record
return (outRecord, retInputBookmark)
def generateDataset(aggregationInfo, inputFilename, outputFilename=None):
"""Generate a dataset of aggregated values
Parameters:
----------------------------------------------------------------------------
aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] |
[weeks days hours minutes seconds milliseconds microseconds]
NOTE: years and months are mutually-exclusive with the other units.
See getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
inputFilename: filename of the input dataset within examples/prediction/data
outputFilename: name for the output file. If not given, a name will be
generated based on the input filename and the aggregation params
retval: Name of the generated output file. This will be the same as the input
file name if no aggregation needed to be performed
If the input file contained a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for all will be to pick the first: lambda x: x[0]
Returns: the path of the aggregated data file if aggregation was performed
(in the same directory as the given input file); if aggregation did not
need to be performed, then the given inputFile argument value is returned.
"""
# Create the input stream
inputFullPath = resource_filename("nupic.datafiles", inputFilename)
inputObj = FileRecordStream(inputFullPath)
# Instantiate the aggregator
aggregator = Aggregator(aggregationInfo=aggregationInfo,
inputFields=inputObj.getFields())
# Is it a null aggregation? If so, just return the input file unmodified
if aggregator.isNullAggregation():
return inputFullPath
# ------------------------------------------------------------------------
# If we were not given an output filename, create one based on the
# aggregation settings
if outputFilename is None:
outputFilename = 'agg_%s' % \
os.path.splitext(os.path.basename(inputFullPath))[0]
timePeriods = 'years months weeks days '\
'hours minutes seconds milliseconds microseconds'
for k in timePeriods.split():
if aggregationInfo.get(k, 0) > 0:
outputFilename += '_%s_%d' % (k, aggregationInfo[k])
outputFilename += '.csv'
outputFilename = os.path.join(os.path.dirname(inputFullPath), outputFilename)
# ------------------------------------------------------------------------
# If some other process already started creating this file, simply
# wait for it to finish and return without doing anything
lockFilePath = outputFilename + '.please_wait'
if os.path.isfile(outputFilename) or \
os.path.isfile(lockFilePath):
while os.path.isfile(lockFilePath):
print 'Waiting for %s to be fully written by another process' % \
lockFilePath
time.sleep(1)
return outputFilename
# Create the lock file
lockFD = open(lockFilePath, 'w')
# -------------------------------------------------------------------------
# Create the output stream
outputObj = FileRecordStream(streamID=outputFilename, write=True,
fields=inputObj.getFields())
# -------------------------------------------------------------------------
# Write all aggregated records to the output
while True:
inRecord = inputObj.getNextRecord()
(aggRecord, aggBookmark) = aggregator.next(inRecord, None)
if aggRecord is None and inRecord is None:
break
if aggRecord is not None:
outputObj.appendRecord(aggRecord)
return outputFilename
def getFilename(aggregationInfo, inputFile):
"""Generate the filename for aggregated dataset
The filename is based on the input filename and the
aggregation period.
Returns the inputFile if no aggregation required (aggregation
info has all 0's)
"""
# Find the actual file, with an absolute path
inputFile = resource_filename("nupic.datafiles", inputFile)
a = defaultdict(lambda: 0, aggregationInfo)
outputDir = os.path.dirname(inputFile)
outputFile = 'agg_%s' % os.path.splitext(os.path.basename(inputFile))[0]
noAggregation = True
timePeriods = 'years months weeks days '\
'hours minutes seconds milliseconds microseconds'
for k in timePeriods.split():
if a[k] > 0:
noAggregation = False
outputFile += '_%s_%d' % (k, a[k])
if noAggregation:
return inputFile
outputFile += '.csv'
outputFile = os.path.join(outputDir, outputFile)
return outputFile
| 28,894 | Python | .py | 649 | 37.597843 | 82 | 0.63239 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,879 | json_helpers.py | numenta_nupic-legacy/src/nupic/data/json_helpers.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This script is a wrapper for JSON primitives, such as validation.
# Using routines of this module permits us to replace the underlying
# implementation with a better one without disrupting client code.
#
# In particular, at the time of this writing, there weren't really great
# json validation packages available for python. We initially settled
# on validictory, but it has a number of shortcomings, such as:
# * format error diagnostic message isn't always helpful for diagnosis
# * doesn't support references
# * doesn't support application of defaults
# * doesn't support dependencies
#
# TODO: offer a combined json parsing/validation function that applies
# defaults from the schema
# TODO: duplicate of 'validate', 'ValidationError', 'loadJSONValueFromFile'
# in swarming.hypersearch.utils -- will want to remove that later
import json
import math
import os
import validictory
class ValidationError(validictory.ValidationError):
pass
class NaNInvalidator(validictory.SchemaValidator):
""" validictory.SchemaValidator subclass to not accept NaN values as numbers.
Usage:
validate(value, schemaDict, validator_cls=NaNInvalidator)
"""
def validate_type_number(self, val):
return not math.isnan(val) \
and super(NaNInvalidator, self).validate_type_number(val)
def validate(value, **kwds):
""" Validate a python value against json schema:
validate(value, schemaPath)
validate(value, schemaDict)
value: python object to validate against the schema
The json schema may be specified either as a path of the file containing
the json schema or as a python dictionary using one of the
following keywords as arguments:
schemaPath: Path of file containing the json schema object.
schemaDict: Python dictionary containing the json schema object
Returns: nothing
Raises:
ValidationError when value fails json validation
"""
assert len(kwds.keys()) >= 1
assert 'schemaPath' in kwds or 'schemaDict' in kwds
schemaDict = None
if 'schemaPath' in kwds:
schemaPath = kwds.pop('schemaPath')
schemaDict = loadJsonValueFromFile(schemaPath)
elif 'schemaDict' in kwds:
schemaDict = kwds.pop('schemaDict')
try:
validictory.validate(value, schemaDict, **kwds)
except validictory.ValidationError as e:
raise ValidationError(e)
def loadJsonValueFromFile(inputFilePath):
""" Loads a json value from a file and converts it to the corresponding python
object.
inputFilePath:
Path of the json file;
Returns:
python value that represents the loaded json value
"""
with open(inputFilePath) as fileObj:
value = json.load(fileObj)
return value
def test():
"""
"""
import sys
schemaDict = {
"description":"JSON schema for json_helpers.py test code",
"type":"object",
"additionalProperties":False,
"properties":{
"myBool":{
"description":"Some boolean property",
"required":True,
"type":"boolean"
}
}
}
d = {
'myBool': False
}
print "Validating schemaDict method in positive test..."
validate(d, schemaDict=schemaDict)
print "ok\n"
print "Validating schemaDict method in negative test..."
try:
validate({}, schemaDict=schemaDict)
except ValidationError:
print "ok\n"
else:
print "FAILED\n"
sys.exit(1)
schemaPath = os.path.join(os.path.dirname(__file__), "testSchema.json")
print "Validating schemaPath method in positive test using %s..." % \
(os.path.abspath(schemaPath),)
validate(d, schemaPath=schemaPath)
print "ok\n"
print "Validating schemaPath method in negative test using %s..." % \
(os.path.abspath(schemaPath),)
try:
validate({}, schemaPath=schemaPath)
except ValidationError:
print "ok\n"
else:
print "FAILED\n"
sys.exit(1)
return
if __name__ == "__main__":
test()
| 4,915 | Python | .py | 134 | 33 | 80 | 0.712326 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,880 | utils.py | numenta_nupic-legacy/src/nupic/data/utils.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Collection of utilities to process input data
"""
import datetime
import string
# Workaround for this error:
# "ImportError: Failed to import _strptime because the import lockis held by
# another thread"
DATETIME_FORMATS = ('%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S:%f',
'%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d',
'%m/%d/%Y %H:%M', '%m/%d/%y %H:%M',
'%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S')
"""
These are the supported timestamp formats to parse. The first is the format used
by NuPIC when serializing datetimes.
"""
def parseTimestamp(s):
"""
Parses a textual datetime format and return a Python datetime object.
The supported format is: ``yyyy-mm-dd h:m:s.ms``
The time component is optional.
- hours are 00..23 (no AM/PM)
- minutes are 00..59
- seconds are 00..59
- micro-seconds are 000000..999999
:param s: (string) input time text
:return: (datetime.datetime)
"""
s = s.strip()
for pattern in DATETIME_FORMATS:
try:
return datetime.datetime.strptime(s, pattern)
except ValueError:
pass
raise ValueError('The provided timestamp %s is malformed. The supported '
'formats are: [%s]' % (s, ', '.join(DATETIME_FORMATS)))
def serializeTimestamp(t):
"""
Turns a datetime object into a string.
:param t: (datetime.datetime)
:return: (string) in default format (see
:const:`~nupic.data.utils.DATETIME_FORMATS` [0])
"""
return t.strftime(DATETIME_FORMATS[0])
def serializeTimestampNoMS(t):
"""
Turns a datetime object into a string ignoring milliseconds.
:param t: (datetime.datetime)
:return: (string) in default format (see
:const:`~nupic.data.utils.DATETIME_FORMATS` [2])
"""
return t.strftime(DATETIME_FORMATS[2])
def parseBool(s):
"""
String to boolean
:param s: (string)
:return: (bool)
"""
l = s.lower()
if l in ("true", "t", "1"):
return True
if l in ("false", "f", "0"):
return False
raise Exception("Unable to convert string '%s' to a boolean value" % s)
def floatOrNone(f):
"""
Tries to convert input to a float input or returns ``None``.
:param f: (object) thing to convert to a float
:return: (float or ``None``)
"""
if f == 'None':
return None
return float(f)
def intOrNone(i):
"""
Tries to convert input to a int input or returns ``None``.
:param f: (object) thing to convert to a int
:return: (int or ``None``)
"""
if i.strip() == 'None' or i.strip() == 'NULL':
return None
return int(i)
def escape(s):
"""
Escape commas, tabs, newlines and dashes in a string
Commas are encoded as tabs.
:param s: (string) to escape
:returns: (string) escaped string
"""
if s is None:
return ''
assert isinstance(s, basestring), \
"expected %s but got %s; value=%s" % (basestring, type(s), s)
s = s.replace('\\', '\\\\')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace(',', '\t')
return s
def unescape(s):
"""
Unescapes a string that may contain commas, tabs, newlines and dashes
Commas are decoded from tabs.
:param s: (string) to unescape
:returns: (string) unescaped string
"""
assert isinstance(s, basestring)
s = s.replace('\t', ',')
s = s.replace('\\,', ',')
s = s.replace('\\n', '\n')
s = s.replace('\\\\', '\\')
return s
def parseSdr(s):
"""
Parses a string containing only 0's and 1's and return a Python list object.
:param s: (string) string to parse
:returns: (list) SDR out
"""
assert isinstance(s, basestring)
sdr = [int(c) for c in s if c in ("0", "1")]
if len(sdr) != len(s):
raise ValueError("The provided string %s is malformed. The string should "
"have only 0's and 1's.")
return sdr
def serializeSdr(sdr):
"""
Serialize Python list object containing only 0's and 1's to string.
:param sdr: (list) binary
:returns: (string) SDR out
"""
return "".join(str(bit) for bit in sdr)
def parseStringList(s):
"""
Parse a string of space-separated numbers, returning a Python list.
:param s: (string) to parse
:returns: (list) binary SDR
"""
assert isinstance(s, basestring)
return [int(i) for i in s.split()]
def stripList(listObj):
"""
Convert a list of numbers to a string of space-separated values.
:param listObj: (list) to convert
:returns: (string) of space-separated values
"""
return " ".join(str(i) for i in listObj)
| 5,538 | Python | .py | 166 | 29.584337 | 80 | 0.639428 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,881 | __init__.py | numenta_nupic-legacy/src/nupic/data/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""A data source for the prediction framework has a getNext() method.
FileSource is a base class for file-based sources. There are two
sub-classes:
TextFileSource - can read delimited text files (e.g. CSV files)
StandardSource - can read a binary file of marshaled Python objects
"""
SENTINEL_VALUE_FOR_MISSING_DATA = None
from function_source import FunctionSource
| 1,347 | Python | .py | 28 | 46.928571 | 72 | 0.703422 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,882 | sorter.py | numenta_nupic-legacy/src/nupic/data/sorter.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
from operator import itemgetter
import psutil
from nupic.support import title
from nupic.data.file_record_stream import FileRecordStream
"""The sorter sorts PF datasets in the standard File format
- It supports sorting by multiple fields
- It allows sorting of datasets that don't fit in memory
- It allows selecting a subset of the original fields
The sorter uses merge sort ()
"""
def sort(filename, key, outputFile, fields=None, watermark=1024 * 1024 * 100):
"""Sort a potentially big file
filename - the input file (standard File format)
key - a list of field names to sort by
outputFile - the name of the output file
fields - a list of fields that should be included (all fields if None)
watermark - when available memory goes bellow the watermark create a new chunk
sort() works by reading as records from the file into memory
and calling _sortChunk() on each chunk. In the process it gets
rid of unneeded fields if any. Once all the chunks have been sorted and
written to chunk files it calls _merge() to merge all the chunks into a
single sorted file.
Note, that sort() gets a key that contains field names, which it converts
into field indices for _sortChunk() becuase _sortChunk() doesn't need to know
the field name.
sort() figures out by itself how many chunk files to use by reading records
from the file until the low watermark value of availabel memory is hit and
then it sorts the current records, generates a chunk file, clears the sorted
records and starts on a new chunk.
The key field names are turned into indices
"""
if fields is not None:
assert set(key).issubset(set([f[0] for f in fields]))
with FileRecordStream(filename) as f:
# Find the indices of the requested fields
if fields:
fieldNames = [ff[0] for ff in fields]
indices = [f.getFieldNames().index(name) for name in fieldNames]
assert len(indices) == len(fields)
else:
fileds = f.getFields()
fieldNames = f.getFieldNames()
indices = None
# turn key fields to key indices
key = [fieldNames.index(name) for name in key]
chunk = 0
records = []
for i, r in enumerate(f):
# Select requested fields only
if indices:
temp = []
for i in indices:
temp.append(r[i])
r = temp
# Store processed record
records.append(r)
# Check memory
available_memory = psutil.avail_phymem()
# If bellow the watermark create a new chunk, reset and keep going
if available_memory < watermark:
_sortChunk(records, key, chunk, fields)
records = []
chunk += 1
# Sort and write the remainder
if len(records) > 0:
_sortChunk(records, key, chunk, fields)
chunk += 1
# Marge all the files
_mergeFiles(key, chunk, outputFile, fields)
def _sortChunk(records, key, chunkIndex, fields):
"""Sort in memory chunk of records
records - a list of records read from the original dataset
key - a list of indices to sort the records by
chunkIndex - the index of the current chunk
The records contain only the fields requested by the user.
_sortChunk() will write the sorted records to a standard File
named "chunk_<chunk index>.csv" (chunk_0.csv, chunk_1.csv,...).
"""
title(additional='(key=%s, chunkIndex=%d)' % (str(key), chunkIndex))
assert len(records) > 0
# Sort the current records
records.sort(key=itemgetter(*key))
# Write to a chunk file
if chunkIndex is not None:
filename = 'chunk_%d.csv' % chunkIndex
with FileRecordStream(filename, write=True, fields=fields) as o:
for r in records:
o.appendRecord(r)
assert os.path.getsize(filename) > 0
return records
def _mergeFiles(key, chunkCount, outputFile, fields):
"""Merge sorted chunk files into a sorted output file
chunkCount - the number of available chunk files
outputFile the name of the sorted output file
_mergeFiles()
"""
title()
# Open all chun files
files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)]
# Open output file
with FileRecordStream(outputFile, write=True, fields=fields) as o:
# Open all chunk files
files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)]
records = [f.getNextRecord() for f in files]
# This loop will run until all files are exhausted
while not all(r is None for r in records):
# Cleanup None values (files that were exhausted)
indices = [i for i,r in enumerate(records) if r is not None]
records = [records[i] for i in indices]
files = [files[i] for i in indices]
# Find the current record
r = min(records, key=itemgetter(*key))
# Write it to the file
o.appendRecord(r)
# Find the index of file that produced the current record
index = records.index(r)
# Read a new record from the file
records[index] = files[index].getNextRecord()
# Cleanup chunk files
for i, f in enumerate(files):
f.close()
os.remove('chunk_%d.csv' % i)
def writeTestFile(testFile, fields, big):
if big:
print 'Creating big test file (763MB)...'
payload = 'x' * 10 ** 8
else:
print 'Creating a small big test file...'
payload = 'x' * 3
with FileRecordStream(testFile, write=True, fields=fields) as o:
print '.'; o.appendRecord([1,3,6, payload])
print '.'; o.appendRecord([2,3,6, payload])
print '.'; o.appendRecord([1,4,6, payload])
print '.'; o.appendRecord([2,4,6, payload])
print '.'; o.appendRecord([1,3,5, payload])
print '.'; o.appendRecord([2,3,5, payload])
print '.'; o.appendRecord([1,4,5, payload])
print '.'; o.appendRecord([2,4,5, payload])
def test(long):
import shutil
from tempfile import gettempdir
print 'Running sorter self-test...'
# Switch to a temp dir in order to create files freely
workDir = os.path.join(gettempdir(), 'sorter_test')
if os.path.exists(workDir):
shutil.rmtree(workDir)
os.makedirs(workDir)
os.chdir(workDir)
print 'cwd:', os.getcwd()
# The fields definition used by all tests
fields = [
('f1', 'int', ''),
('f2', 'int', ''),
('f3', 'int', ''),
('payload', 'string', '')
]
# Create a test file
testFile = '1.csv'
if not os.path.isfile(testFile):
writeTestFile(testFile, fields, big=long)
# Set watermark here to 300MB bellow current available memory. That ensures
# multiple chunk files in the big testcase
mem = psutil.avail_phymem()
watermark = mem - 300 * 1024 * 1024
print 'Test sorting by f1 and f2, watermak:', watermark
results = []
sort(testFile,
key=['f1', 'f2'],
fields=fields,
outputFile='f1_f2.csv',
watermark=watermark)
with FileRecordStream('f1_f2.csv') as f:
for r in f:
results.append(r[:3])
assert results == [
[1, 3, 6],
[1, 3, 5],
[1, 4, 6],
[1, 4, 5],
[2, 3, 6],
[2, 3, 5],
[2, 4, 6],
[2, 4, 5],
]
mem = psutil.avail_phymem()
watermark = mem - 300 * 1024 * 1024
print 'Test sorting by f2 and f1, watermark:', watermark
results = []
sort(testFile,
key=['f2', 'f1'],
fields=fields,
outputFile='f2_f1.csv',
watermark=watermark)
with FileRecordStream('f2_f1.csv') as f:
for r in f:
results.append(r[:3])
assert results == [
[1, 3, 6],
[1, 3, 5],
[2, 3, 6],
[2, 3, 5],
[1, 4, 6],
[1, 4, 5],
[2, 4, 6],
[2, 4, 5],
]
mem = psutil.avail_phymem()
watermark = mem - 300 * 1024 * 1024
print 'Test sorting by f3 and f2, watermark:', watermark
results = []
sort(testFile,
key=['f3', 'f2'],
fields=fields,
outputFile='f3_f2.csv',
watermark=watermark)
with FileRecordStream('f3_f2.csv') as f:
for r in f:
results.append(r[:3])
assert results == [
[1, 3, 5],
[2, 3, 5],
[1, 4, 5],
[2, 4, 5],
[1, 3, 6],
[2, 3, 6],
[1, 4, 6],
[2, 4, 6],
]
# Cleanup the work dir
os.chdir('..')
shutil.rmtree(workDir)
print 'done'
if __name__=='__main__':
print 'Starting tests...'
test('--long' in sys.argv)
print 'All tests pass'
| 9,163 | Python | .py | 258 | 31.155039 | 80 | 0.662297 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,883 | function_source.py | numenta_nupic-legacy/src/nupic/data/function_source.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import types
import marshal
class FunctionSource(object):
"""A source of programmatically-generated data.
This class is a shell for a user-supplied function
and user-supplied state. It knows how to serialize
its function -- this allows a network to be saved."""
SEQUENCEINFO_RESET_ONLY = 0
SEQUENCEINFO_SEQUENCEID_ONLY = 1
SEQUENCEINFO_BOTH = 2
SEQUENCEINFO_NONE = 3
def __init__(self,
func,
state=None,
resetFieldName=None,
sequenceIdFieldName=None):
self.func = func
self.state = state
self.resetFieldName = resetFieldName
self.sequenceIdFieldName = sequenceIdFieldName
self._cacheSequenceInfoType()
def _cacheSequenceInfoType(self):
"""Figure out whether reset, sequenceId,
both or neither are present in the data.
Compute once instead of every time.
Taken from filesource.py"""
hasReset = self.resetFieldName is not None
hasSequenceId = self.sequenceIdFieldName is not None
if hasReset and not hasSequenceId:
self._sequenceInfoType = self.SEQUENCEINFO_RESET_ONLY
self._prevSequenceId = 0
elif not hasReset and hasSequenceId:
self._sequenceInfoType = self.SEQUENCEINFO_SEQUENCEID_ONLY
self._prevSequenceId = None
elif hasReset and hasSequenceId:
self._sequenceInfoType = self.SEQUENCEINFO_BOTH
else:
self._sequenceInfoType = self.SEQUENCEINFO_NONE
def getNextRecordDict(self):
result = self.func(self.state)
# Automatically add _sequenceId and _reset fields
if self._sequenceInfoType == self.SEQUENCEINFO_SEQUENCEID_ONLY:
sequenceId = result[self.sequenceIdFieldName]
reset = sequenceId != self._prevSequenceId
self._prevSequenceId = sequenceId
elif self._sequenceInfoType == self.SEQUENCEINFO_NONE:
reset = 0
sequenceId = 0
elif self._sequenceInfoType == self.SEQUENCEINFO_RESET_ONLY:
reset = result[self.resetFieldName]
if reset:
self._prevSequenceId += 1
sequenceId = self._prevSequenceId
elif self._sequenceInfoType == self.SEQUENCEINFO_BOTH:
reset = result[self.resetFieldName]
sequenceId = result[self.sequenceIdFieldName]
else:
raise RuntimeError(
"Internal error -- sequence info type not set in RecordSensor")
# convert to int. Note hash(int) = same value
sequenceId = hash(sequenceId)
reset = int(bool(reset))
result["_reset"] = reset
result["_sequenceId"] = sequenceId
result["_category"] = [None]
return result
def __getstate__(self):
state = dict(
state = self.state,
resetFieldName = self.resetFieldName,
sequenceIdFieldName = self.sequenceIdFieldName,
sequenceInfoType = self._sequenceInfoType,
prevSequenceId = getattr(self, "_prevSequenceId", None)
)
func = dict()
func['code'] = marshal.dumps(self.func.func_code)
func['name'] = self.func.func_name
func['doc'] = self.func.func_doc
state['func'] = func
return state
def __setstate__(self, state):
funcinfo = state['func']
self.func = types.FunctionType(marshal.loads(funcinfo['code']), globals())
self.func.func_name = funcinfo['name']
self.func.func_doc = funcinfo['doc']
self.state = state['state']
self.resetFieldName = state['resetFieldName']
self.sequenceIdFieldName = state['sequenceIdFieldName']
self._sequenceInfoType = state['sequenceInfoType']
self._prevSequenceId = state['prevSequenceId']
| 4,498 | Python | .py | 110 | 35.854545 | 78 | 0.696519 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,884 | stats.py | numenta_nupic-legacy/src/nupic/data/stats.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import pickle
from pkg_resources import resource_filename
from nupic.regions.record_sensor import RecordSensor
from nupic.data.file_record_stream import FileRecordStream
"""
Generate column statistics for a StandardSource.
Each entry in statsInfo corresponds to one column, and contains a list
of statistics that should be computed for that column. Known statistics
are:
for floating point or integer values:
number -- min, max, mean
for string or integer values:
category -- list of all unique values and count
The model for a stats object is that you call the constructor with
the first value, and then add values with add().
(The alternative would be no args for the constructor, and
all values would be added with add()).
There are two reasons for this:
- no initialization check required every time we add a value
- getStats() can always return a valid result
"""
class NumberStatsCollector(object):
validTypes = [int, float]
def __init__(self):
self.min = 0
self.max = 0
self.sum = 0
self.n = 0
self.initialized = False
def _addFirst(self, value):
if type(value) not in self.validTypes:
raise RuntimeError("NumberStatsCollector -- value '%s' is not a valid type" % value)
value = float(value)
self.min = value
self.max = value
self.sum = value
self.n = 1
self.initialized = True
def add(self, value):
if not self.initialized:
self._addFirst(value)
return
value = float(value)
if value < self.min:
self.min = value
if value > self.max:
self.max = value
self.sum += value
self.n += 1
def getStats(self):
return dict(min = self.min,
max = self.max,
sum = self.sum,
n = self.n,
average = self.sum / self.n)
class CategoryStatsCollector(object):
def __init__(self):
self.categories = dict()
def add(self, value):
self.categories[value] = self.categories.get(value, 0) + 1
def getStats(self):
return dict(categories = self.categories)
def getStatsFilename(filename, statsInfo, filters=[]):
if not os.path.isabs(filename):
raise RuntimeError("Filename %s is not an absolute path" % filename)
if not filename.endswith(".csv"):
raise RuntimeError("generateStats only supports csv files: %s" % filename)
d = os.path.dirname(filename)
basename = os.path.basename(filename).replace("csv", "stats")
sstring = "stats"
for key in statsInfo:
sstring += "_" + key
if len(filters) > 0:
sstring += "_filters"
for filter in filters:
sstring += "_" + filter.getShortName()
statsFilename = os.path.join(d, sstring + "_" + basename)
return statsFilename
def generateStats(filename, statsInfo, maxSamples = None, filters=[], cache=True):
"""Generate requested statistics for a dataset and cache to a file.
If filename is None, then don't cache to a file"""
# Sanity checking
if not isinstance(statsInfo, dict):
raise RuntimeError("statsInfo must be a dict -- "
"found '%s' instead" % type(statsInfo))
filename = resource_filename("nupic.datafiles", filename)
if cache:
statsFilename = getStatsFilename(filename, statsInfo, filters)
# Use cached stats if found AND if it has the right data
if os.path.exists(statsFilename):
try:
r = pickle.load(open(statsFilename, "rb"))
except:
# Ok to ignore errors -- we will just re-generate the file
print "Warning: unable to load stats for %s -- " \
"will regenerate" % filename
r = dict()
requestedKeys = set([s for s in statsInfo])
availableKeys = set(r.keys())
unavailableKeys = requestedKeys.difference(availableKeys)
if len(unavailableKeys ) == 0:
return r
else:
print "generateStats: re-generating stats file %s because " \
"keys %s are not available" % \
(filename, str(unavailableKeys))
os.remove(filename)
print "Generating statistics for file '%s' with filters '%s'" % (filename, filters)
sensor = RecordSensor()
sensor.dataSource = FileRecordStream(filename)
sensor.preEncodingFilters = filters
# Convert collector description to collector object
stats = []
for field in statsInfo:
# field = key from statsInfo
if statsInfo[field] == "number":
# This wants a field name e.g. consumption and the field type as the value
statsInfo[field] = NumberStatsCollector()
elif statsInfo[field] == "category":
statsInfo[field] = CategoryStatsCollector()
else:
raise RuntimeError("Unknown stats type '%s' for field '%s'" % (statsInfo[field], field))
# Now collect the stats
if maxSamples is None:
maxSamples = 500000
for i in xrange(maxSamples):
try:
record = sensor.getNextRecord()
except StopIteration:
break
for (name, collector) in statsInfo.items():
collector.add(record[name])
del sensor
# Assemble the results and return
r = dict()
for (field, collector) in statsInfo.items():
stats = collector.getStats()
if field not in r:
r[field] = stats
else:
r[field].update(stats)
if cache:
f = open(statsFilename, "wb")
pickle.dump(r, f)
f.close()
# caller may need to know name of cached file
r["_filename"] = statsFilename
return r
| 6,351 | Python | .py | 169 | 32.95858 | 94 | 0.681899 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,885 | field_meta.py | numenta_nupic-legacy/src/nupic/data/field_meta.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module defines the structure of meta-data that describes the field name,
field type, special field attribute, etc. for a field in a dataset.
"""
from collections import namedtuple
FieldMetaInfoBase = namedtuple('FieldMetaInfoBase', ['name', 'type', 'special'])
class FieldMetaInfo(FieldMetaInfoBase):
"""
This class acts as a container of meta-data for a single field (column) of
a dataset. Each instance of this class has ``name``, ``type``, and ``special``
properties.
Examples:
1. Access a sub-element from an instance of FieldMetaInfo:
- ``metainfo.name``
- ``metainfo.type``
- ``metainfo.special``
2. Create a single element of ``FieldMetaInfo`` from a tuple of ``name``,
``type``, and ``special``:
.. code-block:: python
e = ('pounds', FieldMetaType.float, FieldMetaSpecial.none)
m = FieldMetaInfo.createFromFileFieldElement(e)
:param str name: field name
:param str type: one of the values from FieldMetaType
:param str special: one of the values from FieldMetaSpecial
:raises ValueError: if type or special arg values are invalid
"""
def __init__(self,
name,
type, # pylint: disable=W0622
special):
if not FieldMetaType.isValid(type):
raise ValueError('Unexpected field type %r' % (type,))
if not FieldMetaSpecial.isValid(special):
raise ValueError('Unexpected field special attribute %r' % (special,))
super(FieldMetaInfo, self).__init__(name, type, special)
@staticmethod
def createFromFileFieldElement(fieldInfoTuple):
"""
Creates a :class:`.field_meta.FieldMetaInfo` instance from a tuple containing
``name``, ``type``, and ``special``.
:param fieldInfoTuple: Must contain ``name``, ``type``, and ``special``
:return: (:class:`~.field_meta.FieldMetaInfo`) instance
"""
return FieldMetaInfo._make(fieldInfoTuple)
@classmethod
def createListFromFileFieldList(cls, fields):
"""
Creates a FieldMetaInfo list from the a list of tuples. Basically runs
:meth:`~.field_meta.FieldMetaInfo.createFromFileFieldElement` on each tuple.
*Example:*
.. code-block:: python
# Create a list of FieldMetaInfo instances from a list of File meta-data
# tuples
el = [("pounds", FieldMetaType.float, FieldMetaSpecial.none),
("price", FieldMetaType.float, FieldMetaSpecial.none),
("id", FieldMetaType.string, FieldMetaSpecial.sequence),
("date", FieldMetaType.datetime, FieldMetaSpecial.timestamp),
]
ml = FieldMetaInfo.createListFromFileFieldList(el)
:param fields: a sequence of field attribute tuples conforming to the format
of ``name``, ``type``, and ``special``
:return: A list of :class:`~.field_meta.FieldMetaInfo` elements corresponding
to the given 'fields' list.
"""
return [cls.createFromFileFieldElement(f) for f in fields]
class FieldMetaType(object):
"""
Public values for the field data types. Valid types are:
- ``string``
- ``datetime``
- ``int``
- ``float``
- ``bool``
- ``list``
- ``sdr``
"""
string = 'string'
datetime = 'datetime'
integer = 'int'
float = 'float'
boolean = 'bool'
list = 'list'
sdr = 'sdr'
_ALL = (string, datetime, integer, float, boolean, list, sdr)
@classmethod
def isValid(cls, fieldDataType):
"""Check a candidate value whether it's one of the valid field data types
:param fieldDataType: (string) candidate field data type
:returns: True if the candidate value is a legitimate field data type value;
False if not
"""
return fieldDataType in cls._ALL
class FieldMetaSpecial(object):
"""
Public values for the "special" field attribute. Valid values are:
- ``R``: reset
- ``S``: sequence
- ``T``: timestamp
- ``C``: category
- ``L``: learning
"""
none = ''
reset = 'R'
sequence = 'S'
timestamp = 'T'
category = 'C'
learning = 'L'
_ALL = (none, reset, sequence, timestamp, category, learning,)
@classmethod
def isValid(cls, attr):
"""Check a candidate value whether it's one of the valid attributes
:param attr: (string) candidate value
:returns: True if the candidate value is a legitimate "special" field
attribute; False if not
"""
return attr in cls._ALL
| 5,416 | Python | .py | 136 | 34.860294 | 81 | 0.668131 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,886 | inference_shifter.py | numenta_nupic-legacy/src/nupic/data/inference_shifter.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""TimeShifter class for shifting ModelResults."""
import collections
import copy
from nupic.frameworks.opf.opf_utils import InferenceElement, ModelResult
class InferenceShifter(object):
"""
Shifts time for :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` objects.
This is useful for plotting results with the predictions at the same time step
as the input data.
"""
def __init__(self):
self._inferenceBuffer = None
def shift(self, modelResult):
"""Shift the model result and return the new instance.
Queues up the T(i+1) prediction value and emits a T(i)
input/prediction pair, if possible. E.g., if the previous T(i-1)
iteration was learn-only, then we would not have a T(i) prediction in our
FIFO and would not be able to emit a meaningful input/prediction pair.
:param modelResult: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult`
instance to shift.
:return: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance that
has been shifted
"""
inferencesToWrite = {}
if self._inferenceBuffer is None:
maxDelay = InferenceElement.getMaxDelay(modelResult.inferences)
self._inferenceBuffer = collections.deque(maxlen=maxDelay + 1)
self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences))
for inferenceElement, inference in modelResult.inferences.iteritems():
if isinstance(inference, dict):
inferencesToWrite[inferenceElement] = {}
for key, _ in inference.iteritems():
delay = InferenceElement.getTemporalDelay(inferenceElement, key)
if len(self._inferenceBuffer) > delay:
prevInference = self._inferenceBuffer[delay][inferenceElement][key]
inferencesToWrite[inferenceElement][key] = prevInference
else:
inferencesToWrite[inferenceElement][key] = None
else:
delay = InferenceElement.getTemporalDelay(inferenceElement)
if len(self._inferenceBuffer) > delay:
inferencesToWrite[inferenceElement] = (
self._inferenceBuffer[delay][inferenceElement])
else:
if type(inference) in (list, tuple):
inferencesToWrite[inferenceElement] = [None] * len(inference)
else:
inferencesToWrite[inferenceElement] = None
shiftedResult = ModelResult(rawInput=modelResult.rawInput,
sensorInput=modelResult.sensorInput,
inferences=inferencesToWrite,
metrics=modelResult.metrics,
predictedFieldIdx=modelResult.predictedFieldIdx,
predictedFieldName=modelResult.predictedFieldName)
return shiftedResult
| 3,767 | Python | .py | 75 | 42.8 | 82 | 0.678989 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,887 | data_generator.py | numenta_nupic-legacy/src/nupic/data/generators/data_generator.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import random as rand
from nupic.encoders import adaptive_scalar, sdr_category, date
from nupic.bindings.math import GetNTAReal
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.generators.distributions import *
realDType = GetNTAReal()
class DataGenerator():
"""The DataGenerator provides a framework for generating, encoding, saving
and exporting records. Each column of the output contains records with a
specific set of parameters such as encoderType, n, w, etc. This interface
is intended to be used for testing the spatial pooler, temporal memory and
for generating artificial datasets.
"""
def __init__(self, name='testDataset', seed=42, verbosity=0):
"""Initialize the dataset generator with a random seed and a name"""
self.name=name
self.verbosity=verbosity
self.setSeed(seed)
self.fields=[]
def getDescription(self):
"""Returns a description of the dataset"""
description = {'name':self.name, 'fields':[f.name for f in self.fields], \
'numRecords by field':[f.numRecords for f in self.fields]}
return description
def setSeed(self, seed):
"""Set the random seed and the numpy seed
Parameters:
--------------------------------------------------------------------
seed: random seed
"""
rand.seed(seed)
np.random.seed(seed)
def addField(self, name, fieldParams, encoderParams):
"""Add a single field to the dataset.
Parameters:
-------------------------------------------------------------------
name: The user-specified name of the field
fieldSpec: A list of one or more dictionaries specifying parameters
to be used for dataClass initialization. Each dict must
contain the key 'type' that specifies a distribution for
the values in this field
encoderParams: Parameters for the field encoder
"""
assert fieldParams is not None and'type' in fieldParams
dataClassName = fieldParams.pop('type')
try:
dataClass=eval(dataClassName)(fieldParams)
except TypeError, e:
print ("#### Error in constructing %s class object. Possibly missing "
"some required constructor parameters. Parameters "
"that were provided are: %s" % (dataClass, fieldParams))
raise
encoderParams['dataClass']=dataClass
encoderParams['dataClassName']=dataClassName
fieldIndex = self.defineField(name, encoderParams)
def addMultipleFields(self, fieldsInfo):
"""Add multiple fields to the dataset.
Parameters:
-------------------------------------------------------------------
fieldsInfo: A list of dictionaries, containing a field name, specs for
the data classes and encoder params for the corresponding
field.
"""
assert all(x in field for x in ['name', 'fieldSpec', 'encoderParams'] for field \
in fieldsInfo)
for spec in fieldsInfo:
self.addField(spec.pop('name'), spec.pop('fieldSpec'), spec.pop('encoderParams'))
def defineField(self, name, encoderParams=None):
"""Initialize field using relevant encoder parameters.
Parameters:
-------------------------------------------------------------------
name: Field name
encoderParams: Parameters for the encoder.
Returns the index of the field
"""
self.fields.append(_field(name, encoderParams))
return len(self.fields)-1
def setFlag(self, index, flag):
"""Set flag for field at index. Flags are special characters such as 'S' for
sequence or 'T' for timestamp.
Parameters:
--------------------------------------------------------------------
index: index of field whose flag is being set
flag: special character
"""
assert len(self.fields)>index
self.fields[index].flag=flag
def generateRecord(self, record):
"""Generate a record. Each value is stored in its respective field.
Parameters:
--------------------------------------------------------------------
record: A 1-D array containing as many values as the number of fields
fields: An object of the class field that specifies the characteristics
of each value in the record
Assertion:
--------------------------------------------------------------------
len(record)==len(fields): A value for each field must be specified.
Replace missing values of any type by
SENTINEL_VALUE_FOR_MISSING_DATA
This method supports external classes but not combination of classes.
"""
assert(len(record)==len(self.fields))
if record is not None:
for x in range(len(self.fields)):
self.fields[x].addValue(record[x])
else:
for field in self.fields:
field.addValue(field.dataClass.getNext())
def generateRecords(self, records):
"""Generate multiple records. Refer to definition for generateRecord"""
if self.verbosity>0: print 'Generating', len(records), 'records...'
for record in records:
self.generateRecord(record)
def getRecord(self, n=None):
"""Returns the nth record"""
if n is None:
assert len(self.fields)>0
n = self.fields[0].numRecords-1
assert (all(field.numRecords>n for field in self.fields))
record = [field.values[n] for field in self.fields]
return record
def getAllRecords(self):
"""Returns all the records"""
values=[]
numRecords = self.fields[0].numRecords
assert (all(field.numRecords==numRecords for field in self.fields))
for x in range(numRecords):
values.append(self.getRecord(x))
return values
def encodeRecord(self, record, toBeAdded=True):
"""Encode a record as a sparse distributed representation
Parameters:
--------------------------------------------------------------------
record: Record to be encoded
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields
"""
encoding=[self.fields[i].encodeValue(record[i], toBeAdded) for i in \
xrange(len(self.fields))]
return encoding
def encodeAllRecords(self, records=None, toBeAdded=True):
"""Encodes a list of records.
Parameters:
--------------------------------------------------------------------
records: One or more records. (i,j)th element of this 2D array
specifies the value at field j of record i.
If unspecified, records previously generated and stored are
used.
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields
"""
if records is None:
records = self.getAllRecords()
if self.verbosity>0: print 'Encoding', len(records), 'records.'
encodings = [self.encodeRecord(record, toBeAdded) for record in records]
return encodings
def addValueToField(self, i, value=None):
"""Add 'value' to the field i.
Parameters:
--------------------------------------------------------------------
value: value to be added
i: value is added to field i
"""
assert(len(self.fields)>i)
if value is None:
value = self.fields[i].dataClass.getNext()
self.fields[i].addValue(value)
return value
else: self.fields[i].addValue(value)
def addValuesToField(self, i, numValues):
"""Add values to the field i."""
assert(len(self.fields)>i)
values = [self.addValueToField(i) for n in range(numValues)]
return values
def getSDRforValue(self, i, j):
"""Returns the sdr for jth value at column i"""
assert len(self.fields)>i
assert self.fields[i].numRecords>j
encoding = self.fields[i].encodings[j]
return encoding
def getZeroedOutEncoding(self, n):
"""Returns the nth encoding with the predictedField zeroed out"""
assert all(field.numRecords>n for field in self.fields)
encoding = np.concatenate([field.encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)\
if field.isPredictedField else field.encodings[n] for field in self.fields])
return encoding
def getTotaln(self):
"""Returns the cumulative n for all the fields in the dataset"""
n = sum([field.n for field in self.fields])
return n
def getTotalw(self):
"""Returns the cumulative w for all the fields in the dataset"""
w = sum([field.w for field in self.fields])
return w
def getEncoding(self, n):
"""Returns the nth encoding"""
assert (all(field.numEncodings>n for field in self.fields))
encoding = np.concatenate([field.encodings[n] for field in self.fields])
return encoding
def getAllEncodings(self):
"""Returns encodings for all the records"""
numEncodings=self.fields[0].numEncodings
assert (all(field.numEncodings==numEncodings for field in self.fields))
encodings = [self.getEncoding(index) for index in range(numEncodings)]
return encodings
def getAllFieldNames(self):
"""Returns all field names"""
names = [field.name for field in self.fields]
return names
def getAllFlags(self):
"""Returns flags for all fields"""
flags = [field.flag for field in self.fields]
return flags
def getAllDataTypes(self):
"""Returns data types for all fields"""
dataTypes = [field.dataType for field in self.fields]
return dataTypes
def getFieldDescriptions(self):
"""Returns descriptions for all fields"""
descriptions = [field.getDescription() for field in self.fields]
return descriptions
def saveRecords(self, path='myOutput'):
"""Export all the records into a csv file in numenta format.
Example header format:
fieldName1 fieldName2 fieldName3
date string float
T S
Parameters:
--------------------------------------------------------------------
path: Relative path of the file to which the records are to be exported
"""
numRecords = self.fields[0].numRecords
assert (all(field.numRecords==numRecords for field in self.fields))
import csv
with open(path+'.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(self.getAllFieldNames())
writer.writerow(self.getAllDataTypes())
writer.writerow(self.getAllFlags())
writer.writerows(self.getAllRecords())
if self.verbosity>0:
print '******', numRecords,'records exported in numenta format to file:',\
path,'******\n'
def removeAllRecords(self):
"""Deletes all the values in the dataset"""
for field in self.fields:
field.encodings, field.values=[], []
field.numRecords, field.numEncodings= (0, 0)
class _field():
def __init__(self, name, encoderSpec):
"""Initialize a field with various parameters such as n, w, flag, dataType,
encoderType, and tag predicted field."""
self.name=name
#Default values
self.n, self.w = (100, 15)
self.encoderType,self.dataType,self.dataClassName = (None, None, None)
self.flag=''
self.isPredictedField=False
if encoderSpec is not None:
if 'n' in encoderSpec: self.n = encoderSpec.pop('n')
if 'w' in encoderSpec: self.w = encoderSpec.pop('w')
if 'flag' in encoderSpec: self.flag = encoderSpec.pop('flag')
if 'isPredictedField' in encoderSpec: self.isPredictedField\
= encoderSpec.pop('isPredictedField')
if 'dataClass' in encoderSpec: self.dataClass \
= encoderSpec.pop('dataClass')
if 'dataClassName' in encoderSpec: self.dataClassName \
= encoderSpec.pop('dataClassName')
if 'dataType' in encoderSpec: self.dataType = encoderSpec.pop('dataType')
if 'encoderType' in encoderSpec: self.encoderType \
= encoderSpec.pop('encoderType')
# ==========================================================================
# Setting up the encoders
if self.dataType is None and self.encoderType is None:
raise RuntimeError('At least one of dataType and encoderType must be specified')
assert(self.dataType is not None or self.encoderType is not None)
if self.dataType is None or self.encoderType is None:
self._setTypes(encoderSpec)
self._initializeEncoders(encoderSpec)
self.encodings=[]
self.values=[]
self.numRecords=0
self.numEncodings=0
def getDescription(self):
description = dict(n=self.n, w=self.w, flag=self.flag, isPredictedField=\
self.isPredictedField, dataClass=self.dataClassName, encoderType= \
self.encoderType, numRecords=self.numRecords, numEncodings=self.numEncodings)
return description
def addValues(self, values):
"""Add values to the field"""
for v in values:
self.addValue(v)
def addValue(self, value):
"""Add value to the field"""
self.values.append(value)
self.numRecords+=1
def encodeValue(self, value, toBeAdded=True):
"""Value is encoded as a sdr using the encoding parameters of the Field"""
encodedValue = np.array(self.encoder.encode(value), dtype=realDType)
if toBeAdded:
self.encodings.append(encodedValue)
self.numEncodings+=1
return encodedValue
def _setTypes(self, encoderSpec):
"""Set up the dataTypes and initialize encoders"""
if self.encoderType is None:
if self.dataType in ['int','float']:
self.encoderType='adaptiveScalar'
elif self.dataType=='string':
self.encoderType='category'
elif self.dataType in ['date', 'datetime']:
self.encoderType='date'
if self.dataType is None:
if self.encoderType in ['scalar','adaptiveScalar']:
self.dataType='float'
elif self.encoderType in ['category', 'enumeration']:
self.dataType='string'
elif self.encoderType in ['date', 'datetime']:
self.dataType='datetime'
def _initializeEncoders(self, encoderSpec):
""" Initialize the encoders"""
#Initializing scalar encoder
if self.encoderType in ['adaptiveScalar', 'scalar']:
if 'minval' in encoderSpec:
self.minval = encoderSpec.pop('minval')
else: self.minval=None
if 'maxval' in encoderSpec:
self.maxval = encoderSpec.pop('maxval')
else: self.maxval = None
self.encoder=adaptive_scalar.AdaptiveScalarEncoder(name='AdaptiveScalarEncoder', \
w=self.w, n=self.n, minval=self.minval, maxval=self.maxval, periodic=False, forced=True)
#Initializing category encoder
elif self.encoderType=='category':
self.encoder=sdr_category.SDRCategoryEncoder(name='categoryEncoder', \
w=self.w, n=self.n)
#Initializing date encoder
elif self.encoderType in ['date', 'datetime']:
self.encoder=date.DateEncoder(name='dateEncoder')
else:
raise RuntimeError('Error in constructing class object. Either encoder type'
'or dataType must be specified')
| 16,250 | Python | .py | 361 | 38.426593 | 145 | 0.641206 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,888 | distributions.py | numenta_nupic-legacy/src/nupic/data/generators/distributions.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
from math import *
from random import *
import numpy as np
class Distributions():
def __init__(self):
"""A distribution is a set of values with certain statistical properties
Methods/properties that must be implemented by subclasses
- getNext() -- Returns the next value for the distribution
- getData(n) -- Returns n values for the distribution
- getDescription() -- Returns a dict of parameters pertinent to the
distribution, if any as well as state variables.
"""
def getNext(self):
""" Returns the next value of the disribution using knowledge about the
current state of the distribution as stored in numValues.
"""
raise Exception("getNext must be implemented by all subclasses")
def getData(self, n):
"""Returns the next n values for the distribution as a list."""
records = [self.getNext() for x in range(n)]
return records
def getDescription(self):
"""Returns a dict of parameters pertinent to the distribution (if any) as
well as state variables such as numValues."""
raise Exception("getDescription must be implemented by all subclasses")
class SineWave(Distributions):
"""Generates a sinewave of a given period, amplitude and phase shift"""
def __init__(self, params={}):
if 'period' in params: self.period=params.pop('period')
else: self.period=pi
if 'amplitude' in params:
self.amplitude=params.pop('amplitude')
else: self.amplitude=1
if 'phaseShift' in params: self.phaseShift = params.pop('phaseShift')
else: self.phaseShift=0
self.valueNum=0
def getNext(self):
nextVal = self.amplitude*np.sin(2*pi*(self.period)*self.valueNum*(pi/180) - \
self.phaseShift)
self.valueNum+=1
return nextVal
def getData(self, numOfValues):
return Distributions.getData(self, numOfValues)
def getDescription(self):
description = dict(name='SineWave', period=self.period, amplitude=self.amplitude, \
phaseShift=self.phaseShift, numOfValues=self.valueNum)
return description
class RandomCategories(Distributions):
"""Generates random categories"""
def __init__(self, params={}):
self.valueNum=0
self.alphabet = 'abcdefghijklmnopqrstuvwxyz'
def getNext(self):
self.valueNum+=1
return ''.join(x for x in sample(self.alphabet, randint(3,15)))
def getData(self, numOfValues):
return Distributions.getData(self, numOfValues)
def getDescription(self):
description = dict(name='Random Categories', numOfValues=self.valueNum)
return description
class GaussianDistribution(Distributions):
"""Generates a gaussian distribution"""
def __init__(self, params={}):
self.valueNum=0
assert 'numOfValues' in params
self.numOfValues = params.pop('numOfValues')
if 'mean' in params: self.mean = params.pop('mean')
else: self.mean = 0
if 'std' in params: self.std=params.pop('std')
else: self.std = 0.6
self.records = np.random.normal(self.mean, self.std, self.numOfValues)
def getNext(self):
assert (self.numOfValues>self.valueNum)
nextValue = self.records[self.valueNum]
self.valueNum+=1
return nextValue
def getData(self):
return Distributions.getData(self, self.numOfValues)
def getDescription(self):
description = dict(name='GaussianDistribution', mean=self.mean,
standardDeviation=self.std, numOfValues=self.valueNum)
| 4,449 | Python | .py | 102 | 39.166667 | 87 | 0.710121 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,889 | __init__.py | numenta_nupic-legacy/src/nupic/data/generators/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Module containing data generation tools."""
| 1,027 | Python | .py | 21 | 47.857143 | 73 | 0.672637 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,890 | anomalyzer.py | numenta_nupic-legacy/src/nupic/data/generators/anomalyzer.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tool for adding anomalies to data sets."""
import random
import sys
from nupic.data.file_record_stream import FileRecordStream
USAGE = """
Usage:
python anomalyzer.py input output action extraArgs
Actions:
add
extraArgs: column start stop value
Adds value to each element in range [start, stop].
scale
extraArgs: column start stop multiple
Multiplies each element in range [start, stop] by multiple.
copy
extraArgs: start stop [insertLocation] [tsCol]
Copies the values in range [start, stop] and inserts them at
insertLocation, or following the copied section if no insertLocation
is specified. Updates timestamps if tsCol is given.
sample
extraArgs: n [start] [stop] [tsCol]
Samples rows from the specified range and writes them to the new file.
If tsCol is specified, the timestamps of this column are updated.
sample2
Same as sample except the rows before and after the specified range are
preserved.
"""
class Actions(object):
"""Enum class for actions that can be performed."""
ADD = 'add'
SCALE = 'scale'
COPY = 'copy'
SAMPLE = 'sample'
SAMPLE2 = 'sample2'
ACTIONS = (ADD, SCALE, COPY, SAMPLE, SAMPLE2)
def add(reader, writer, column, start, stop, value):
"""Adds a value over a range of rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
column: The column of data to modify.
start: The first row in the range to modify.
end: The last row in the range to modify.
value: The value to add.
"""
for i, row in enumerate(reader):
if i >= start and i <= stop:
row[column] = type(value)(row[column]) + value
writer.appendRecord(row)
def scale(reader, writer, column, start, stop, multiple):
"""Multiplies a value over a range of rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
column: The column of data to modify.
start: The first row in the range to modify.
end: The last row in the range to modify.
multiple: The value to scale/multiply by.
"""
for i, row in enumerate(reader):
if i >= start and i <= stop:
row[column] = type(multiple)(row[column]) * multiple
writer.appendRecord(row)
def copy(reader, writer, start, stop, insertLocation=None, tsCol=None):
"""Copies a range of values to a new location in the data set.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
start: The first row in the range to copy.
stop: The last row in the range to copy.
insertLocation: The location to insert the copied range. If not specified,
the range is inserted immediately following itself.
"""
assert stop >= start
startRows = []
copyRows = []
ts = None
inc = None
if tsCol is None:
tsCol = reader.getTimestampFieldIdx()
for i, row in enumerate(reader):
# Get the first timestamp and the increment.
if ts is None:
ts = row[tsCol]
elif inc is None:
inc = row[tsCol] - ts
# Keep a list of all rows and a list of rows to copy.
if i >= start and i <= stop:
copyRows.append(row)
startRows.append(row)
# Insert the copied rows.
if insertLocation is None:
insertLocation = stop + 1
startRows[insertLocation:insertLocation] = copyRows
# Update the timestamps.
for row in startRows:
row[tsCol] = ts
writer.appendRecord(row)
ts += inc
def sample(reader, writer, n, start=None, stop=None, tsCol=None,
writeSampleOnly=True):
"""Samples n rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
n: The number of elements to sample.
start: The first row in the range to sample from.
stop: The last row in the range to sample from.
tsCol: If specified, the timestamp column to update.
writeSampleOnly: If False, the rows before start are written before the
sample and the rows after stop are written after the sample.
"""
rows = list(reader)
if tsCol is not None:
ts = rows[0][tsCol]
inc = rows[1][tsCol] - ts
if start is None:
start = 0
if stop is None:
stop = len(rows) - 1
initialN = stop - start + 1
# Select random rows in the sample range to delete until the desired number
# of rows are left.
numDeletes = initialN - n
for i in xrange(numDeletes):
delIndex = random.randint(start, stop - i)
del rows[delIndex]
# Remove outside rows if specified.
if writeSampleOnly:
rows = rows[start:start + n]
# Rewrite columns if tsCol is given.
if tsCol is not None:
ts = rows[0][tsCol]
# Write resulting rows.
for row in rows:
if tsCol is not None:
row[tsCol] = ts
ts += inc
writer.appendRecord(row)
def main(args):
inputPath, outputPath, action = args[:3]
with FileRecordStream(inputPath) as reader:
with FileRecordStream(outputPath, write=True,
fields=reader.fields) as writer:
assert action in Actions.ACTIONS, USAGE
if action == Actions.ADD:
assert len(args) == 7, USAGE
start = int(args[4])
stop = int(args[5])
column = int(args[3])
valueType = eval(reader.fields[column][1])
value = valueType(args[6])
add(reader, writer, column, start, stop, value)
elif action == Actions.SCALE:
assert len(args) == 7, USAGE
start = int(args[4])
stop = int(args[5])
column = int(args[3])
valueType = eval(reader.fields[column][1])
multiple = valueType(args[6])
scale(reader, writer, column, start, stop, multiple)
elif action == Actions.COPY:
assert 5 <= len(args) <= 8, USAGE
start = int(args[3])
stop = int(args[4])
if len(args) > 5:
insertLocation = int(args[5])
else:
insertLocation = None
if len(args) == 7:
tsCol = int(args[6])
else:
tsCol = None
copy(reader, writer, start, stop, insertLocation, tsCol)
elif action == Actions.SAMPLE or action == Actions.SAMPLE2:
assert 4 <= len(args) <= 7, USAGE
n = int(args[3])
start = None
if len(args) > 4:
start = int(args[4])
stop = None
if len(args) > 5:
stop = int(args[5])
tsCol = None
if len(args) > 6:
tsCol = int(args[6])
writeSampleOnly = action == Actions.SAMPLE
sample(reader, writer, n, start, stop, tsCol, writeSampleOnly)
if __name__ == "__main__":
if len(sys.argv) <= 1:
print USAGE
sys.exit(1)
main(sys.argv[1:])
| 7,753 | Python | .py | 213 | 31.380282 | 78 | 0.665291 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,891 | sequence_machine.py | numenta_nupic-legacy/src/nupic/data/generators/sequence_machine.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utilities for generating and manipulating sequences, for use in
experimentation and tests.
"""
import numpy as np
from nupic.bindings.math import Random
class SequenceMachine(object):
"""
Base sequence machine class.
"""
def __init__(self,
patternMachine,
seed=42):
"""
@param patternMachine (PatternMachine) Pattern machine instance
"""
# Save member variables
self.patternMachine = patternMachine
# Initialize member variables
self._random = Random(seed)
def generateFromNumbers(self, numbers):
"""
Generate a sequence from a list of numbers.
Note: Any `None` in the list of numbers is considered a reset.
@param numbers (list) List of numbers
@return (list) Generated sequence
"""
sequence = []
for number in numbers:
if number == None:
sequence.append(number)
else:
pattern = self.patternMachine.get(number)
sequence.append(pattern)
return sequence
def addSpatialNoise(self, sequence, amount):
"""
Add spatial noise to each pattern in the sequence.
@param sequence (list) Sequence
@param amount (float) Amount of spatial noise
@return (list) Sequence with spatial noise
"""
newSequence = []
for pattern in sequence:
if pattern is not None:
pattern = self.patternMachine.addNoise(pattern, amount)
newSequence.append(pattern)
return newSequence
def prettyPrintSequence(self, sequence, verbosity=1):
"""
Pretty print a sequence.
@param sequence (list) Sequence
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
text = ""
for i in xrange(len(sequence)):
pattern = sequence[i]
if pattern == None:
text += "<reset>"
if i < len(sequence) - 1:
text += "\n"
else:
text += self.patternMachine.prettyPrintPattern(pattern,
verbosity=verbosity)
return text
def generateNumbers(self, numSequences, sequenceLength, sharedRange=None):
"""
@param numSequences (int) Number of sequences to return,
separated by None
@param sequenceLength (int) Length of each sequence
@param sharedRange (tuple) (start index, end index) indicating range of
shared subsequence in each sequence
(None if no shared subsequences)
@return (list) Numbers representing sequences
"""
numbers = []
if sharedRange:
sharedStart, sharedEnd = sharedRange
sharedLength = sharedEnd - sharedStart
sharedNumbers = range(numSequences * sequenceLength,
numSequences * sequenceLength + sharedLength)
for i in xrange(numSequences):
start = i * sequenceLength
newNumbers = np.array(range(start, start + sequenceLength), np.uint32)
self._random.shuffle(newNumbers)
newNumbers = list(newNumbers)
if sharedRange is not None:
newNumbers[sharedStart:sharedEnd] = sharedNumbers
numbers += newNumbers
numbers.append(None)
return numbers
| 4,218 | Python | .py | 112 | 31.151786 | 78 | 0.65275 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,892 | pattern_machine.py | numenta_nupic-legacy/src/nupic/data/generators/pattern_machine.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utilities for generating and manipulating patterns, for use in
experimentation and tests.
"""
import numpy as np
from nupic.bindings.math import Random
class PatternMachine(object):
"""
Base pattern machine class.
"""
def __init__(self,
n,
w,
num=100,
seed=42):
"""
@param n (int) Number of available bits in pattern
@param w (int/list) Number of on bits in pattern
If list, each pattern will have a `w` randomly
selected from the list.
@param num (int) Number of available patterns
"""
# Save member variables
self._n = n
self._w = w
self._num = num
# Initialize member variables
self._random = Random(seed)
self._patterns = dict()
self._generate()
def get(self, number):
"""
Return a pattern for a number.
@param number (int) Number of pattern
@return (set) Indices of on bits
"""
if not number in self._patterns:
raise IndexError("Invalid number")
return self._patterns[number]
def addNoise(self, bits, amount):
"""
Add noise to pattern.
@param bits (set) Indices of on bits
@param amount (float) Probability of switching an on bit with a random bit
@return (set) Indices of on bits in noisy pattern
"""
newBits = set()
for bit in bits:
if self._random.getReal64() < amount:
newBits.add(self._random.getUInt32(self._n))
else:
newBits.add(bit)
return newBits
def numbersForBit(self, bit):
"""
Return the set of pattern numbers that match a bit.
@param bit (int) Index of bit
@return (set) Indices of numbers
"""
if bit >= self._n:
raise IndexError("Invalid bit")
numbers = set()
for index, pattern in self._patterns.iteritems():
if bit in pattern:
numbers.add(index)
return numbers
def numberMapForBits(self, bits):
"""
Return a map from number to matching on bits,
for all numbers that match a set of bits.
@param bits (set) Indices of bits
@return (dict) Mapping from number => on bits.
"""
numberMap = dict()
for bit in bits:
numbers = self.numbersForBit(bit)
for number in numbers:
if not number in numberMap:
numberMap[number] = set()
numberMap[number].add(bit)
return numberMap
def prettyPrintPattern(self, bits, verbosity=1):
"""
Pretty print a pattern.
@param bits (set) Indices of on bits
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
numberMap = self.numberMapForBits(bits)
text = ""
numberList = []
numberItems = sorted(numberMap.iteritems(),
key=lambda (number, bits): len(bits),
reverse=True)
for number, bits in numberItems:
if verbosity > 2:
strBits = [str(n) for n in bits]
numberText = "{0} (bits: {1})".format(number, ",".join(strBits))
elif verbosity > 1:
numberText = "{0} ({1} bits)".format(number, len(bits))
else:
numberText = str(number)
numberList.append(numberText)
text += "[{0}]".format(", ".join(numberList))
return text
def _generate(self):
"""
Generates set of random patterns.
"""
candidates = np.array(range(self._n), np.uint32)
for i in xrange(self._num):
self._random.shuffle(candidates)
pattern = candidates[0:self._getW()]
self._patterns[i] = set(pattern)
def _getW(self):
"""
Gets a value of `w` for use in generating a pattern.
"""
w = self._w
if type(w) is list:
return w[self._random.getUInt32(len(w))]
else:
return w
class ConsecutivePatternMachine(PatternMachine):
"""
Pattern machine class that generates patterns with non-overlapping,
consecutive on bits.
"""
def _generate(self):
"""
Generates set of consecutive patterns.
"""
n = self._n
w = self._w
assert type(w) is int, "List for w not supported"
for i in xrange(n / w):
pattern = set(xrange(i * w, (i+1) * w))
self._patterns[i] = pattern
| 5,229 | Python | .py | 158 | 27.341772 | 78 | 0.626595 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,893 | unimportable_node.py | numenta_nupic-legacy/src/nupic/regions/unimportable_node.py | """This file need only exist for testing purposes, and is not a valid region.
"""
| 82 | Python | .py | 2 | 40 | 77 | 0.7375 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,894 | anomaly_likelihood_region.py | numenta_nupic-legacy/src/nupic/regions/anomaly_likelihood_region.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Implementation of region for computing anomaly likelihoods."""
from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood
from nupic.bindings.regions.PyRegion import PyRegion
from nupic.serializable import Serializable
class AnomalyLikelihoodRegion(PyRegion, Serializable):
"""Region for computing the anomaly likelihoods."""
@classmethod
def getSpec(cls):
return {
"description": ("Region that computes anomaly likelihoods for \
temporal memory."),
"singleNodeOnly": True,
"inputs": {
"rawAnomalyScore": {
"description": "The anomaly score whose \
likelihood is to be computed",
"dataType": "Real32",
"count": 1,
"required": True,
"isDefaultInput": False
},
"metricValue": {
"description": "The input metric value",
"dataType": "Real32",
"count": 1,
"required": True,
"isDefaultInput": False
},
},
"outputs": {
"anomalyLikelihood": {
"description": "The resultant anomaly likelihood",
"dataType": "Real32",
"count": 1,
"isDefaultOutput": True,
},
},
"parameters": {
"learningPeriod": {
"description": "The number of iterations required for the\
algorithm to learn the basic patterns in the dataset\
and for the anomaly score to 'settle down'.",
"dataType": "UInt32",
"count": 1,
"constraints": "",
"defaultValue": 288,
"accessMode": "ReadWrite"
},
"estimationSamples": {
"description": "The number of reasonable anomaly scores\
required for the initial estimate of the\
Gaussian.",
"dataType": "UInt32",
"count": 1,
"constraints": "",
"defaultValue": 100,
"accessMode": "ReadWrite"
},
"historicWindowSize": {
"description": "Size of sliding window of historical data\
points to maintain for periodic reestimation\
of the Gaussian.",
"dataType": "UInt32",
"count": 1,
"constraints": "",
"defaultValue": 8640,
"accessMode": "ReadWrite"
},
"reestimationPeriod": {
"description": "How often we re-estimate the Gaussian\
distribution.",
"dataType": "UInt32",
"count": 1,
"constraints": "",
"defaultValue": 100,
"accessMode": "ReadWrite"
},
},
"commands": {
},
}
def __init__(self,
learningPeriod = 288,
estimationSamples = 100,
historicWindowSize = 8640,
reestimationPeriod = 100):
self.anomalyLikelihood = AnomalyLikelihood(
learningPeriod = learningPeriod,
estimationSamples = estimationSamples,
historicWindowSize = historicWindowSize,
reestimationPeriod = reestimationPeriod)
def __eq__(self, other):
return self.anomalyLikelihood == other.anomalyLikelihood
def __ne__(self, other):
return not self == other
@classmethod
def getSchema(cls):
return AnomalyLikelihood.getSchema()
@classmethod
def read(cls, proto):
anomalyLikelihoodRegion = object.__new__(cls)
anomalyLikelihoodRegion.anomalyLikelihood = AnomalyLikelihood.read(proto)
return anomalyLikelihoodRegion
def write(self, proto):
self.anomalyLikelihood.write(proto)
def initialize(self):
pass
def compute(self, inputs, outputs):
anomalyScore = inputs["rawAnomalyScore"][0]
value = inputs["metricValue"][0]
anomalyProbability = self.anomalyLikelihood.anomalyProbability(
value, anomalyScore)
outputs["anomalyLikelihood"][0] = anomalyProbability
| 4,973 | Python | .py | 133 | 29.007519 | 79 | 0.603445 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,895 | sdr_classifier_region.py | numenta_nupic-legacy/src/nupic/regions/sdr_classifier_region.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file implements the SDR Classifier region. See the comments in the class
definition of SDRClassifierRegion for a description.
"""
import warnings
from nupic.bindings.regions.PyRegion import PyRegion
from nupic.algorithms.sdr_classifier_factory import SDRClassifierFactory
from nupic.support.configuration import Configuration
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.SDRClassifierRegion_capnp import SDRClassifierRegionProto
class SDRClassifierRegion(PyRegion):
"""
SDRClassifierRegion implements a SDR classifier that accepts a binary
input from the level below (the "activationPattern") and information from the
sensor and encoders (the "classification") describing the input to the system
at that time step.
The SDR classifier maps input patterns to class labels. There are as many
output units as the number of class labels or buckets (in the case of scalar
encoders). The output is a probabilistic distribution over all class labels.
During inference, the output is calculated by first doing a weighted summation
of all the inputs, and then perform a softmax nonlinear function to get
the predicted distribution of class labels
During learning, the connection weights between input units and output units
are adjusted to maximize the likelihood of the model
The caller can choose to tell the region that the classifications for
iteration N+K should be aligned with the activationPattern for iteration N.
This results in the classifier producing predictions for K steps in advance.
Any number of different K's can be specified, allowing the classifier to learn
and infer multi-step predictions for a number of steps in advance.
:param steps: (int) default=1
:param alpha: (float) default=0.001
:param verbosity: (int) How verbose to log, default=0
:param implementation: (string) default=None
:param maxCategoryCount: (int) default=None
"""
@classmethod
def getSpec(cls):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
"""
ns = dict(
description=SDRClassifierRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
actValueIn=dict(
description="Actual value of the field to predict. Only taken "
"into account if the input has no category field.",
dataType="Real32",
count=0,
required=False,
regionLevel=False,
isDefaultInput=False,
requireSplitterMap=False),
bucketIdxIn=dict(
description="Active index of the encoder bucket for the "
"actual value of the field to predict. Only taken "
"into account if the input has no category field.",
dataType="UInt64",
count=0,
required=False,
regionLevel=False,
isDefaultInput=False,
requireSplitterMap=False),
categoryIn=dict(
description='Vector of categories of the input sample',
dataType='Real32',
count=0,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
bottomUpIn=dict(
description='Belief values over children\'s groups',
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
predictedActiveCells=dict(
description="The cells that are active and predicted",
dataType='Real32',
count=0,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
sequenceIdIn=dict(
description="Sequence ID",
dataType='UInt64',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
categoriesOut=dict(
description='Classification results',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False,
requireSplitterMap=False),
actualValues=dict(
description='Classification results',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False,
requireSplitterMap=False),
probabilities=dict(
description='Classification results',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False,
requireSplitterMap=False),
),
parameters=dict(
learningMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in learning mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=1,
accessMode='ReadWrite'),
inferenceMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in inference mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
maxCategoryCount=dict(
description='The maximal number of categories the '
'classifier will distinguish between.',
dataType='UInt32',
required=True,
count=1,
constraints='',
# arbitrarily large value
defaultValue=2000,
accessMode='Create'),
steps=dict(
description='Comma separated list of the desired steps of '
'prediction that the classifier should learn',
dataType="Byte",
count=0,
constraints='',
defaultValue='0',
accessMode='Create'),
alpha=dict(
description='The alpha is the learning rate of the classifier.'
'lower alpha results in longer term memory and slower '
'learning',
dataType="Real32",
count=1,
constraints='',
defaultValue=0.001,
accessMode='Create'),
implementation=dict(
description='The classifier implementation to use.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints='enum: py, cpp'),
verbosity=dict(
description='An integer that controls the verbosity level, '
'0 means no verbose output, increasing integers '
'provide more verbosity.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='ReadWrite'),
),
commands=dict()
)
return ns
def __init__(self,
steps='1',
alpha=0.001,
verbosity=0,
implementation=None,
maxCategoryCount=None
):
# Set default implementation
if implementation is None:
implementation = Configuration.get(
'nupic.opf.sdrClassifier.implementation')
self.implementation = implementation
# Convert the steps designation to a list
self.steps = steps
self.stepsList = [int(i) for i in steps.split(",")]
self.alpha = alpha
self.verbosity = verbosity
# Initialize internal structures
self._sdrClassifier = None
self.learningMode = True
self.inferenceMode = False
self.maxCategoryCount = maxCategoryCount
self.recordNum = 0
# Flag to know if the compute() function is ever called. This is to
# prevent backward compatibilities issues with the customCompute() method
# being called at the same time as the compute() method. Only compute()
# should be called via network.run(). This flag will be removed once we
# get to cleaning up the htm_prediction_model.py file.
self._computeFlag = False
def initialize(self):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.initialize`.
Is called once by NuPIC before the first call to compute().
Initializes self._sdrClassifier if it is not already initialized.
"""
if self._sdrClassifier is None:
self._sdrClassifier = SDRClassifierFactory.create(
steps=self.stepsList,
alpha=self.alpha,
verbosity=self.verbosity,
implementation=self.implementation,
)
def getAlgorithmInstance(self):
"""
:returns: (:class:`nupic.regions.sdr_classifier_region.SDRClassifierRegion`)
"""
return self._sdrClassifier
def getParameter(self, name, index=-1):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.
"""
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index)
def setParameter(self, name, index, value):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
"""
if name == "learningMode":
self.learningMode = bool(int(value))
elif name == "inferenceMode":
self.inferenceMode = bool(int(value))
else:
return PyRegion.setParameter(self, name, index, value)
@staticmethod
def getSchema():
"""
:returns: the pycapnp proto type that the class uses for serialization.
"""
return SDRClassifierRegionProto
def writeToProto(self, proto):
"""
Write state to proto object.
:param proto: SDRClassifierRegionProto capnproto object
"""
proto.implementation = self.implementation
proto.steps = self.steps
proto.alpha = self.alpha
proto.verbosity = self.verbosity
proto.maxCategoryCount = self.maxCategoryCount
proto.learningMode = self.learningMode
proto.inferenceMode = self.inferenceMode
proto.recordNum = self.recordNum
self._sdrClassifier.write(proto.sdrClassifier)
@classmethod
def readFromProto(cls, proto):
"""
Read state from proto object.
:param proto: SDRClassifierRegionProto capnproto object
"""
instance = cls()
instance.implementation = proto.implementation
instance.steps = proto.steps
instance.stepsList = [int(i) for i in proto.steps.split(",")]
instance.alpha = proto.alpha
instance.verbosity = proto.verbosity
instance.maxCategoryCount = proto.maxCategoryCount
instance._sdrClassifier = SDRClassifierFactory.read(proto)
instance.learningMode = proto.learningMode
instance.inferenceMode = proto.inferenceMode
instance.recordNum = proto.recordNum
return instance
def compute(self, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
:param inputs: (dict) mapping region input names to numpy.array values
:param outputs: (dict) mapping region output names to numpy.arrays that
should be populated with output values by this method
"""
# This flag helps to prevent double-computation, in case the deprecated
# customCompute() method is being called in addition to compute() called
# when network.run() is called
self._computeFlag = True
patternNZ = inputs["bottomUpIn"].nonzero()[0]
if self.learningMode:
# An input can potentially belong to multiple categories.
# If a category value is < 0, it means that the input does not belong to
# that category.
categories = [category for category in inputs["categoryIn"]
if category >= 0]
if len(categories) > 0:
# Allow to train on multiple input categories.
bucketIdxList = []
actValueList = []
for category in categories:
bucketIdxList.append(int(category))
if "actValueIn" not in inputs:
actValueList.append(int(category))
else:
actValueList.append(float(inputs["actValueIn"]))
classificationIn = {"bucketIdx": bucketIdxList,
"actValue": actValueList}
else:
# If the input does not belong to a category, i.e. len(categories) == 0,
# then look for bucketIdx and actValueIn.
if "bucketIdxIn" not in inputs:
raise KeyError("Network link missing: bucketIdxOut -> bucketIdxIn")
if "actValueIn" not in inputs:
raise KeyError("Network link missing: actValueOut -> actValueIn")
classificationIn = {"bucketIdx": int(inputs["bucketIdxIn"]),
"actValue": float(inputs["actValueIn"])}
else:
# Use Dummy classification input, because this param is required even for
# inference mode. Because learning is off, the classifier is not learning
# this dummy input. Inference only here.
classificationIn = {"actValue": 0, "bucketIdx": 0}
# Perform inference if self.inferenceMode is True
# Train classifier if self.learningMode is True
clResults = self._sdrClassifier.compute(recordNum=self.recordNum,
patternNZ=patternNZ,
classification=classificationIn,
learn=self.learningMode,
infer=self.inferenceMode)
# fill outputs with clResults
if clResults is not None and len(clResults) > 0:
outputs['actualValues'][:len(clResults["actualValues"])] = \
clResults["actualValues"]
for step in self.stepsList:
stepIndex = self.stepsList.index(step)
categoryOut = clResults["actualValues"][clResults[step].argmax()]
outputs['categoriesOut'][stepIndex] = categoryOut
# Flatten the rest of the output. For example:
# Original dict {1 : [0.1, 0.3, 0.2, 0.7]
# 4 : [0.2, 0.4, 0.3, 0.5]}
# becomes: [0.1, 0.3, 0.2, 0.7, 0.2, 0.4, 0.3, 0.5]
stepProbabilities = clResults[step]
for categoryIndex in xrange(self.maxCategoryCount):
flatIndex = categoryIndex + stepIndex * self.maxCategoryCount
if categoryIndex < len(stepProbabilities):
outputs['probabilities'][flatIndex] = \
stepProbabilities[categoryIndex]
else:
outputs['probabilities'][flatIndex] = 0.0
self.recordNum += 1
def customCompute(self, recordNum, patternNZ, classification):
"""
Just return the inference value from one input sample. The actual
learning happens in compute() -- if, and only if learning is enabled --
which is called when you run the network.
.. warning:: This method is deprecated and exists only to maintain backward
compatibility. This method is deprecated, and will be removed. Use
:meth:`nupic.engine.Network.run` instead, which will call
:meth:`~nupic.regions.sdr_classifier_region.compute`.
:param recordNum: (int) Record number of the input sample.
:param patternNZ: (list) of the active indices from the output below
:param classification: (dict) of the classification information:
* ``bucketIdx``: index of the encoder bucket
* ``actValue``: actual value going into the encoder
:returns: (dict) containing inference results, one entry for each step in
``self.steps``. The key is the number of steps, the value is an
array containing the relative likelihood for each ``bucketIdx``
starting from 0.
For example:
::
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
# If the compute flag has not been initialized (for example if we
# restored a model from an old checkpoint) initialize it to False.
if not hasattr(self, "_computeFlag"):
self._computeFlag = False
if self._computeFlag:
# Will raise an exception if the deprecated method customCompute() is
# being used at the same time as the compute function.
warnings.simplefilter('error', DeprecationWarning)
warnings.warn("The customCompute() method should not be "
"called at the same time as the compute() "
"method. The compute() method is called "
"whenever network.run() is called.",
DeprecationWarning)
return self._sdrClassifier.compute(recordNum,
patternNZ,
classification,
self.learningMode,
self.inferenceMode)
def getOutputElementCount(self, outputName):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.
"""
if outputName == "categoriesOut":
return len(self.stepsList)
elif outputName == "probabilities":
return len(self.stepsList) * self.maxCategoryCount
elif outputName == "actualValues":
return self.maxCategoryCount
else:
raise ValueError("Unknown output {}.".format(outputName))
| 18,282 | Python | .py | 435 | 33.181609 | 85 | 0.64752 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,896 | test_region.py | numenta_nupic-legacy/src/nupic/regions/test_region.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This is a PyRegion-based python test regions for exploring/testing CLA Network
# mechanisms
from abc import ABCMeta, abstractmethod
from nupic.bindings.regions.PyRegion import PyRegion
from nupic.data.dict_utils import DictObj
from nupic.serializable import Serializable
class RegionIdentityPolicyBase(object):
""" A base class that must be subclassed by users in order to define the
TestRegion instance's specialization. See also setIdentityPolicyInstance().
"""
__metaclass__ = ABCMeta
@abstractmethod
def initialize(self, testRegionObj):
""" Called from the scope of the region's PyRegion.initialize() method.
testRegionObj: TestRegion instance with which this policy is
associated.
"""
@abstractmethod
def compute(self, inputs, outputs):
"""Perform the main computation
This method is called in each iteration for each phase the node supports.
Called from the scope of the region's PyRegion.compute() method.
inputs: dict of numpy arrays (one per input)
outputs: dict of numpy arrays (one per output)
"""
@abstractmethod
def getOutputElementCount(self, name):
"""Return the number of elements in the given output of the region
Called from the scope of the region's PyRegion.getOutputElementCount() method.
name: the name of the output
"""
@abstractmethod
def getName(self):
""" Return the name of the region
"""
class TestRegion(PyRegion, Serializable):
"""
TestRegion is designed for testing and exploration of CLA Network
mechanisms. Each TestRegion instance takes on a specific role via
the associated TestRegionRole policy (TBD).
"""
def __init__(self,
**kwargs):
super(PyRegion, self).__init__(**kwargs)
# Learning, inference, and other parameters.
# By default we start out in stage learn with inference disabled
# The specialization policy is what gives this region instance its identity.
# Users set this via setIdentityPolicyInstance() before running the network
self.identityPolicy = None
# Debugging support, used in _conditionalBreak
self.breakPdb = False
self.breakKomodo = False
# Construct ephemeral variables (those that aren't serialized)
self.__constructEphemeralInstanceVars()
# Variables set up in initialize()
#self._sfdr = None # FDRCSpatial instance
return
def __constructEphemeralInstanceVars(self):
""" Initialize ephemeral instance variables (those that aren't serialized)
"""
assert not hasattr(self, 'ephemeral')
self.ephemeral = DictObj()
self.ephemeral.logPathInput = ''
self.ephemeral.logPathOutput = ''
self.ephemeral.logPathOutputDense = ''
self.ephemeral._fpLogInput = None
self.ephemeral._fpLogOutput = None
self.ephemeral._fpLogOutputDense = None
return
#############################################################################
#
# Initialization code
#
#############################################################################
def initialize(self):
""" Called by network after all links have been set up
"""
self.identityPolicy.initialize(self)
_debugOut(self.identityPolicy.getName())
#############################################################################
#
# Core compute methods: learning, inference, and prediction
#
#############################################################################
def compute(self, inputs, outputs):
"""
Run one iteration of the region's compute.
The guts of the compute are contained in the _compute() call so that
we can profile it if requested.
"""
# Uncomment this to find out who is generating divide by 0, or other numpy warnings
# numpy.seterr(divide='raise', invalid='raise', over='raise')
self.identityPolicy.compute(inputs, outputs)
_debugOut(("%s: inputs=%s; outputs=%s") % \
(self.identityPolicy.getName(),inputs, outputs))
return
#############################################################################
#
# NuPIC 2 Support
# These methods are required by NuPIC 2
#
#############################################################################
def getOutputElementCount(self, name):
nOutputElements = self.identityPolicy.getOutputElementCount(name)
return nOutputElements
# TODO: as a temporary hack, getParameterArrayCount checks to see if there's a
# variable, private or not, with that name. If so, it attempts to return the
# length of that variable.
def getParameterArrayCount(self, name, index):
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
return len(p)
# TODO: as a temporary hack, getParameterArray checks to see if there's a
# variable, private or not, with that name. If so, it returns the value of the
# variable.
def getParameterArray(self, name, index, a):
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
if len(p) > 0:
a[:] = p[:]
return
#############################################################################
#
# Region API support methods: getSpec, getParameter, and setParameter
#
#############################################################################
@classmethod
def getSpec(cls):
"""Return the base Spec for TestRegion.
"""
spec = dict(
description="TestRegion",
singleNodeOnly=True,
inputs=dict(
bottomUpIn=dict(
description="""The input vector.""",
dataType='Real32',
count=0,
required=False,
regionLevel=True,
isDefaultInput=True,
requireSplitterMap=False),
topDownIn=dict(
description="""The top-down input signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
required = False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
bottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
topDownOut=dict(
description="""The top-down output signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
),
parameters=dict(
logPathInput=dict(
description='Optional name of input log file. If set, every input vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutput=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutputDense=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file as a dense vector.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
breakPdb=dict(
description='Set to 1 to stop in the pdb debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
breakKomodo=dict(
description='Set to 1 to stop in the Komodo debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
),
commands=dict(
setIdentityPolicyInstance=dict(description=
"Set identity policy instance BERORE running the network. " + \
"The instance MUST be derived from TestRegion's " + \
"RegionIdentityPolicyBase class."),
getIdentityPolicyInstance=dict(description=
"Returns identity policy instance that was associated with " + \
"the TestRegion instance via the setIdentityPolicyInstance " + \
"command."),
)
)
return spec
def getParameter(self, parameterName, index=-1):
"""
Get the value of a NodeSpec parameter. Most parameters are handled
automatically by PyRegion's parameter get mechanism. The ones that need
special treatment are explicitly handled here.
"""
assert not (parameterName in self.__dict__ and parameterName in self.ephemeral)
if parameterName in self.ephemeral:
assert parameterName not in self.__dict__
return self.ephemeral[parameterName]
else:
return super(PyRegion, self).getParameter(parameterName, index)
def setParameter(self, parameterName, index, parameterValue):
"""
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
assert not (parameterName in self.__dict__ and parameterName in self.ephemeral)
if parameterName in self.ephemeral:
if parameterName == "logPathInput":
self.ephemeral.logPathInput = parameterValue
# Close any existing log file
if self.ephemeral._fpLogInput:
self.ephemeral._fpLogInput.close()
self.ephemeral._fpLogInput = None
# Open a new log file
if parameterValue:
self.ephemeral._fpLogInput = open(self.ephemeral.logPathInput, 'w')
elif parameterName == "logPathOutput":
self.ephemeral.logPathOutput = parameterValue
# Close any existing log file
if self.ephemeral._fpLogOutput:
self.ephemeral._fpLogOutput.close()
self.ephemeral._fpLogOutput = None
# Open a new log file
if parameterValue:
self.ephemeral._fpLogOutput = open(self.ephemeral.logPathOutput, 'w')
elif parameterName == "logPathOutputDense":
self.ephemeral.logPathOutputDense = parameterValue
# Close any existing log file
if self.ephemeral._fpLogOutputDense:
self.ephemeral._fpLogOutputDense.close()
self.ephemeral._fpLogOutputDense = None
# Open a new log file
if parameterValue:
self.ephemeral._fpLogOutputDense = open(self.ephemeral.logPathOutputDense, 'w')
else:
raise Exception('Unknown parameter: ' + parameterName)
return
#############################################################################
#
# Commands
#
#############################################################################
def setIdentityPolicyInstance(self, identityPolicyObj):
"""TestRegion command that sets identity policy instance. The instance
MUST be derived from TestRegion's RegionIdentityPolicyBase class.
Users MUST set the identity instance BEFORE running the network
Exception: AssertionError if identity policy instance has already been set
or if the passed-in instance is not derived from
RegionIdentityPolicyBase.
"""
assert not self.identityPolicy
assert isinstance(identityPolicyObj, RegionIdentityPolicyBase)
self.identityPolicy = identityPolicyObj
return
def getIdentityPolicyInstance(self):
"""TestRegion command that returns the identity policy instance that was
associated with this TestRegion instance via setIdentityPolicyInstance().
Returns: a RegionIdentityPolicyBase-based instance that was associated with
this TestRegion intstance.
Exception: AssertionError if no identity policy instance has been set.
"""
assert self.identityPolicy
return self.identityPolicy
#############################################################################
#
# Methods to support serialization
#
#############################################################################
def getSchema(self):
return TestRegionProto
def write(self, proto):
"""Save the region's state.
The ephemerals and identity policy are excluded from the saved state.
:param proto: an instance of TestRegionProto to serialize
"""
proto.breakPdb = self.breakPdb
proto.breakKomodo = self.breakKomodo
def read(self, proto):
"""Load the state from the given proto instance.
The saved state does not include the identity policy so this must be
constructed and set after the region is deserialized. This can be done by
calling 'setIdentityPolicyInstance'.
:param proto: an instance of TestRegionProto to load state from
"""
self.breakPdb = proto.breakPdb
self.breakKomodo = proto.breakKomodo
self.__constructEphemeralInstanceVars()
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
# Don't serialize ephemeral data
state.pop('ephemeral')
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
assert 'ephemeral' not in state
self.__dict__.update(state)
# Initialize our ephemeral member variables
self.__constructEphemeralInstanceVars()
return
#############################################################################
#
# Debugging support code
#
#############################################################################
def _conditionalBreak(self):
if self.breakKomodo:
import dbgp.client; dbgp.client.brk()
if self.breakPdb:
import pdb; pdb.set_trace()
return
g_debug = True
def _debugOut(msg):
import sys
global g_debug
if g_debug:
callerTraceback = whois_callers_caller()
print "TEST_REGION (f=%s;line=%s): %s" % \
(callerTraceback.function, callerTraceback.lineno, msg,)
sys.stdout.flush()
return
def whois_callers_caller():
"""
Returns: Traceback namedtuple for our caller's caller
"""
import inspect
frameObj = inspect.stack()[2][0]
return inspect.getframeinfo(frameObj)
| 15,936 | Python | .py | 387 | 34.436693 | 99 | 0.629697 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,897 | __init__.py | numenta_nupic-legacy/src/nupic/regions/__init__.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 977 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,898 | knn_classifier_region.py | numenta_nupic-legacy/src/nupic/regions/knn_classifier_region.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines the k Nearest Neighbor classifier region.
"""
import numpy
from nupic.bindings.regions.PyRegion import PyRegion
from nupic.algorithms import knn_classifier
from nupic.bindings.math import Random
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.knn_classifier_region_capnp import KNNClassifierRegionProto
class KNNClassifierRegion(PyRegion):
"""
KNNClassifierRegion implements the k Nearest Neighbor classification
algorithm. By default it will implement vanilla 1-nearest neighbor using the
L2 (Euclidean) distance norm. There are options for using different norms as
well as various ways of sparsifying the input.
.. note:: categories are ints >= 0.
:param maxCategoryCount: (int)
:param bestPrototypeIndexCount: (int)
:param outputProbabilitiesByDist: (bool)
:param k: (int)
:param distanceNorm: (float)
:param distanceMethod: (string)
:param distThreshold: (float)
:param doBinarization: (bool)
:param inputThresh: (float)
:param useSparseMemory: (bool)
:param sparseThreshold: (float)
:param relativeThreshold: (bool)
:param winnerCount: (int)
:param acceptanceProbability: (float)
:param seed: (int)
:param doSphering: (bool)
:param SVDSampleCount: (int)
:param SVDDimCount: (int)
:param fractionOfMax: (int)
:param useAuxiliary: (int)
:param justUseAuxiliary: (int)
:param verbosity: (int)
:param replaceDuplicates: (bool)
:param cellsPerCol: (int)
:param maxStoredPatterns: (int)
:param minSparsity: (float)
"""
__VERSION__ = 1
@classmethod
def getSpec(cls):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
"""
ns = dict(
description=KNNClassifierRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
categoryIn=dict(
description='Vector of zero or more category indices for this input'
'sample. -1 implies no category.',
dataType='Real32',
count=0,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
bottomUpIn=dict(
description='Belief values over children\'s groups',
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
partitionIn=dict(
description='Partition ID of the input sample',
dataType='Real32',
count=0,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
auxDataIn=dict(
description='Auxiliary data from the sensor',
dataType='Real32',
count=0,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False)
),
outputs=dict(
categoriesOut=dict(
description='A vector representing, for each category '
'index, the likelihood that the input to the node belongs '
'to that category based on the number of neighbors of '
'that category that are among the nearest K.',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
bestPrototypeIndices=dict(
description='A vector that lists, in descending order of '
'the match, the positions of the prototypes '
'that best match the input pattern.',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
categoryProbabilitiesOut=dict(
description='A vector representing, for each category '
'index, the probability that the input to the node belongs '
'to that category based on the distance to the nearest '
'neighbor of each category.',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
),
parameters=dict(
learningMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in learning mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=1,
accessMode='ReadWrite'),
inferenceMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in inference mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
acceptanceProbability=dict(
description='During learning, inputs are learned with '
'probability equal to this parameter. '
'If set to 1.0, the default, '
'all inputs will be considered '
'(subject to other tests).',
dataType='Real32',
count=1,
constraints='',
defaultValue=1.0,
#accessMode='Create'),
accessMode='ReadWrite'), # and Create too
confusion=dict(
description='Confusion matrix accumulated during inference. '
'Reset with reset(). This is available to Python '
'client code only.',
dataType='Handle',
count=2,
constraints='',
defaultValue=None,
accessMode='Read'),
activeOutputCount=dict(
description='The number of active elements in the '
'"categoriesOut" output.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Read'),
categoryCount=dict(
description='An integer indicating the number of '
'categories that have been learned',
dataType='UInt32',
count=1,
constraints='',
defaultValue=None,
accessMode='Read'),
patternCount=dict(
description='Number of patterns learned by the classifier.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=None,
accessMode='Read'),
patternMatrix=dict(
description='The actual patterns learned by the classifier, '
'returned as a matrix.',
dataType='Handle',
count=1,
constraints='',
defaultValue=None,
accessMode='Read'),
k=dict(
description='The number of nearest neighbors to use '
'during inference.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=1,
accessMode='Create'),
maxCategoryCount=dict(
description='The maximal number of categories the '
'classifier will distinguish between.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=2,
accessMode='Create'),
distanceNorm=dict(
description='The norm to use for a distance metric (i.e., '
'the "p" in Lp-norm)',
dataType='Real32',
count=1,
constraints='',
defaultValue=2.0,
accessMode='ReadWrite'),
#accessMode='Create'),
distanceMethod=dict(
description='Method used to compute distances between inputs and'
'prototypes. Possible options are norm, rawOverlap, '
'pctOverlapOfLarger, and pctOverlapOfProto',
dataType="Byte",
count=0,
constraints='enum: norm, rawOverlap, pctOverlapOfLarger, '
'pctOverlapOfProto, pctOverlapOfInput',
defaultValue='norm',
accessMode='ReadWrite'),
outputProbabilitiesByDist=dict(
description='If True, categoryProbabilitiesOut is the probability of '
'each category based on the distance to the nearest neighbor of '
'each category. If False, categoryProbabilitiesOut is the '
'percentage of neighbors among the top K that are of each category.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
distThreshold=dict(
description='Distance Threshold. If a pattern that '
'is less than distThreshold apart from '
'the input pattern already exists in the '
'KNN memory, then the input pattern is '
'not added to KNN memory.',
dataType='Real32',
count=1,
constraints='',
defaultValue=0.0,
accessMode='ReadWrite'),
inputThresh=dict(
description='Input binarization threshold, used if '
'"doBinarization" is True.',
dataType='Real32',
count=1,
constraints='',
defaultValue=0.5,
accessMode='Create'),
doBinarization=dict(
description='Whether or not to binarize the input vectors.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
useSparseMemory=dict(
description='A boolean flag that determines whether or '
'not the KNNClassifier will use sparse Memory',
dataType='UInt32',
count=1,
constraints='',
defaultValue=1,
accessMode='Create'),
minSparsity=dict(
description="If useSparseMemory is set, only vectors with sparsity"
" >= minSparsity will be stored during learning. A value"
" of 0.0 implies all vectors will be stored. A value of"
" 0.1 implies only vectors with at least 10% sparsity"
" will be stored",
dataType='Real32',
count=1,
constraints='',
defaultValue=0.0,
accessMode='ReadWrite'),
sparseThreshold=dict(
description='If sparse memory is used, input variables '
'whose absolute value is less than this '
'threshold will be stored as zero',
dataType='Real32',
count=1,
constraints='',
defaultValue=0.0,
accessMode='Create'),
relativeThreshold=dict(
description='Whether to multiply sparseThreshold by max value '
' in input',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
winnerCount=dict(
description='Only this many elements of the input are '
'stored. All elements are stored if 0.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
doSphering=dict(
description='A boolean indicating whether or not data should'
'be "sphered" (i.e. each dimension should be normalized such'
'that its mean and variance are zero and one, respectively.) This'
' sphering normalization would be performed after all training '
'samples had been received but before inference was performed. '
'The dimension-specific normalization constants would then '
' be applied to all future incoming vectors prior to performing '
' conventional NN inference.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
SVDSampleCount=dict(
description='If not 0, carries out SVD transformation after '
'that many samples have been seen.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
SVDDimCount=dict(
description='Number of dimensions to keep after SVD if greater '
'than 0. If set to -1 it is considered unspecified. '
'If set to 0 it is consider "adaptive" and the number '
'is chosen automatically.',
dataType='Int32',
count=1,
constraints='',
defaultValue=-1,
accessMode='Create'),
fractionOfMax=dict(
description='The smallest singular value which is retained '
'as a fraction of the largest singular value. This is '
'used only if SVDDimCount==0 ("adaptive").',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
useAuxiliary=dict(
description='Whether or not the classifier should use auxiliary '
'input data.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
justUseAuxiliary=dict(
description='Whether or not the classifier should ONLUY use the '
'auxiliary input data.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
verbosity=dict(
description='An integer that controls the verbosity level, '
'0 means no verbose output, increasing integers '
'provide more verbosity.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0 ,
accessMode='ReadWrite'),
keepAllDistances=dict(
description='Whether to store all the protoScores in an array, '
'rather than just the ones for the last inference. '
'When this parameter is changed from True to False, '
'all the scores are discarded except for the most '
'recent one.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=None,
accessMode='ReadWrite'),
replaceDuplicates=dict(
description='A boolean flag that determines whether or'
'not the KNNClassifier should replace duplicates'
'during learning. This should be on when online'
'learning.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=None,
accessMode='ReadWrite'),
cellsPerCol=dict(
description='If >= 1, we assume the input is organized into columns, '
'in the same manner as the temporal memory AND '
'whenever we store a new prototype, we only store the '
'start cell (first cell) in any column which is bursting.'
'colum ',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
maxStoredPatterns=dict(
description='Limits the maximum number of the training patterns '
'stored. When KNN learns in a fixed capacity mode, '
'the unused patterns are deleted once the number '
'of stored patterns is greater than maxStoredPatterns'
'columns. [-1 is no limit] ',
dataType='Int32',
count=1,
constraints='',
defaultValue=-1,
accessMode='Create'),
),
commands=dict()
)
return ns
def __init__(self,
maxCategoryCount=0,
bestPrototypeIndexCount=0,
outputProbabilitiesByDist=False,
k=1,
distanceNorm=2.0,
distanceMethod='norm',
distThreshold=0.0,
doBinarization=False,
inputThresh=0.500,
useSparseMemory=True,
sparseThreshold=0.0,
relativeThreshold=False,
winnerCount=0,
acceptanceProbability=1.0,
seed=42,
doSphering=False,
SVDSampleCount=0,
SVDDimCount=0,
fractionOfMax=0,
useAuxiliary=0,
justUseAuxiliary=0,
verbosity=0,
replaceDuplicates=False,
cellsPerCol=0,
maxStoredPatterns=-1,
minSparsity=0.0
):
self.version = KNNClassifierRegion.__VERSION__
# Convert various arguments to match the expectation
# of the KNNClassifier
if SVDSampleCount == 0:
SVDSampleCount = None
if SVDDimCount == -1:
SVDDimCount = None
elif SVDDimCount == 0:
SVDDimCount = 'adaptive'
if fractionOfMax == 0:
fractionOfMax = None
if useAuxiliary == 0:
useAuxiliary = False
if justUseAuxiliary == 0:
justUseAuxiliary = False
# KNN Parameters
self.knnParams = dict(
k=k,
distanceNorm=distanceNorm,
distanceMethod=distanceMethod,
distThreshold=distThreshold,
doBinarization=doBinarization,
binarizationThreshold=inputThresh,
useSparseMemory=useSparseMemory,
sparseThreshold=sparseThreshold,
relativeThreshold=relativeThreshold,
numWinners=winnerCount,
numSVDSamples=SVDSampleCount,
numSVDDims=SVDDimCount,
fractionOfMax=fractionOfMax,
verbosity=verbosity,
replaceDuplicates=replaceDuplicates,
cellsPerCol=cellsPerCol,
maxStoredPatterns=maxStoredPatterns,
minSparsity=minSparsity
)
# Initialize internal structures
self.outputProbabilitiesByDist = outputProbabilitiesByDist
self.learningMode = True
self.inferenceMode = False
self._epoch = 0
self.acceptanceProbability = acceptanceProbability
self._rgen = Random(seed)
self.confusion = numpy.zeros((1, 1))
self.keepAllDistances = False
self._protoScoreCount = 0
self._useAuxiliary = useAuxiliary
self._justUseAuxiliary = justUseAuxiliary
# Sphering normalization
self._doSphering = doSphering
self._normOffset = None
self._normScale = None
self._samples = None
self._labels = None
# Debugging
self.verbosity = verbosity
# Taps
self._tapFileIn = None
self._tapFileOut = None
self._initEphemerals()
self.maxStoredPatterns = maxStoredPatterns
self.maxCategoryCount = maxCategoryCount
self._bestPrototypeIndexCount = bestPrototypeIndexCount
def _getEphemeralAttributes(self):
"""
List of attributes to not save with serialized state.
"""
return ['_firstComputeCall', '_accuracy', '_protoScores',
'_categoryDistances']
def _initEphemerals(self):
"""
Initialize attributes that are not saved with the checkpoint.
"""
self._firstComputeCall = True
self._accuracy = None
self._protoScores = None
self._categoryDistances = None
self._knn = knn_classifier.KNNClassifier(**self.knnParams)
for x in ('_partitions', '_useAuxiliary', '_doSphering',
'_scanInfo', '_protoScores'):
if not hasattr(self, x):
setattr(self, x, None)
def __setstate__(self, state):
"""Set state from serialized state."""
if 'version' not in state:
self.__dict__.update(state)
elif state['version'] == 1:
# Backward compatibility
if "doSelfValidation" in state:
state.pop("doSelfValidation")
knnState = state['_knn_state']
del state['_knn_state']
self.__dict__.update(state)
self._initEphemerals()
self._knn.__setstate__(knnState)
else:
raise RuntimeError("Invalid KNNClassifierRegion version for __setstate__")
# Set to current version
self.version = KNNClassifierRegion.__VERSION__
def __getstate__(self):
"""Get serializable state."""
state = self.__dict__.copy()
state['_knn_state'] = self._knn.__getstate__()
del state['_knn']
for field in self._getEphemeralAttributes():
del state[field]
return state
def initialize(self):
pass
def _getActiveOutputCount(self):
if self._knn._categoryList:
return int(max(self._knn._categoryList)+1)
else:
return 0
activeOutputCount = property(fget=_getActiveOutputCount)
def _getSeenCategoryCount(self):
return len(set(self._knn._categoryList))
categoryCount = property(fget=_getSeenCategoryCount)
def _getPatternMatrix(self):
if self._knn._M is not None:
return self._knn._M
else:
return self._knn._Memory
def _getAccuracy(self):
n = self.confusion.shape[0]
assert n == self.confusion.shape[1], "Confusion matrix is non-square."
return self.confusion[range(n), range(n)].sum(), self.confusion.sum()
accuracy = property(fget=_getAccuracy)
def clear(self):
self._knn.clear()
def getAlgorithmInstance(self):
"""
:returns: (:class:`~nupic.algorithms.knn_classifier.KNNClassifier`)
"""
return self._knn
def getParameter(self, name, index=-1):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.
"""
if name == "patternCount":
return self._knn._numPatterns
elif name == "patternMatrix":
return self._getPatternMatrix()
elif name == "k":
return self._knn.k
elif name == "distanceNorm":
return self._knn.distanceNorm
elif name == "distanceMethod":
return self._knn.distanceMethod
elif name == "distThreshold":
return self._knn.distThreshold
elif name == "inputThresh":
return self._knn.binarizationThreshold
elif name == "doBinarization":
return self._knn.doBinarization
elif name == "useSparseMemory":
return self._knn.useSparseMemory
elif name == "sparseThreshold":
return self._knn.sparseThreshold
elif name == "winnerCount":
return self._knn.numWinners
elif name == "relativeThreshold":
return self._knn.relativeThreshold
elif name == "SVDSampleCount":
v = self._knn.numSVDSamples
return v if v is not None else 0
elif name == "SVDDimCount":
v = self._knn.numSVDDims
return v if v is not None else 0
elif name == "fractionOfMax":
v = self._knn.fractionOfMax
return v if v is not None else 0
elif name == "useAuxiliary":
return self._useAuxiliary
elif name == "justUseAuxiliary":
return self._justUseAuxiliary
elif name == "doSphering":
return self._doSphering
elif name == "cellsPerCol":
return self._knn.cellsPerCol
elif name == "maxStoredPatterns":
return self.maxStoredPatterns
elif name == 'categoryRecencyList':
return self._knn._categoryRecencyList
else:
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index)
def setParameter(self, name, index, value):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
"""
if name == "learningMode":
self.learningMode = bool(int(value))
self._epoch = 0
elif name == "inferenceMode":
self._epoch = 0
if int(value) and not self.inferenceMode:
self._finishLearning()
self.inferenceMode = bool(int(value))
elif name == "distanceNorm":
self._knn.distanceNorm = value
elif name == "distanceMethod":
self._knn.distanceMethod = value
elif name == "keepAllDistances":
self.keepAllDistances = bool(value)
if not self.keepAllDistances:
# Discard all distances except the latest
if self._protoScores is not None and self._protoScores.shape[0] > 1:
self._protoScores = self._protoScores[-1,:]
if self._protoScores is not None:
self._protoScoreCount = 1
else:
self._protoScoreCount = 0
elif name == "verbosity":
self.verbosity = value
self._knn.verbosity = value
else:
return PyRegion.setParameter(self, name, index, value)
def reset(self):
"""
Resets confusion matrix.
"""
self.confusion = numpy.zeros((1, 1))
def doInference(self, activeInput):
# Explicitly run inference on a vector that is passed in and return the
# category id. Useful for debugging.
prediction, inference, allScores = self._knn.infer(activeInput)
return inference
def enableTap(self, tapPath):
"""
Begin writing output tap files.
:param tapPath: (string) base name of the output tap files to write.
"""
self._tapFileIn = open(tapPath + '.in', 'w')
self._tapFileOut = open(tapPath + '.out', 'w')
def disableTap(self):
"""
Disable writing of output tap files.
"""
if self._tapFileIn is not None:
self._tapFileIn.close()
self._tapFileIn = None
if self._tapFileOut is not None:
self._tapFileOut.close()
self._tapFileOut = None
def handleLogInput(self, inputs):
"""
Write inputs to output tap file.
:param inputs: (iter) some inputs.
"""
if self._tapFileIn is not None:
for input in inputs:
for k in range(len(input)):
print >> self._tapFileIn, input[k],
print >> self._tapFileIn
def handleLogOutput(self, output):
"""
Write outputs to output tap file.
:param outputs: (iter) some outputs.
"""
#raise Exception('MULTI-LINE DUMMY\nMULTI-LINE DUMMY')
if self._tapFileOut is not None:
for k in range(len(output)):
print >> self._tapFileOut, output[k],
print >> self._tapFileOut
def _storeSample(self, inputVector, trueCatIndex, partition=0):
"""
Store a training sample and associated category label
"""
# If this is the first sample, then allocate a numpy array
# of the appropriate size in which to store all samples.
if self._samples is None:
self._samples = numpy.zeros((0, len(inputVector)), dtype=RealNumpyDType)
assert self._labels is None
self._labels = []
# Add the sample vector and category lable
self._samples = numpy.concatenate((self._samples, numpy.atleast_2d(inputVector)), axis=0)
self._labels += [trueCatIndex]
# Add the partition ID
if self._partitions is None:
self._partitions = []
if partition is None:
partition = 0
self._partitions += [partition]
def compute(self, inputs, outputs):
"""
Process one input sample. This method is called by the runtime engine.
.. note:: the number of input categories may vary, but the array size is
fixed to the max number of categories allowed (by a lower region), so
"unused" indices of the input category array are filled with -1s.
TODO: confusion matrix does not support multi-label classification
:param inputs: (dict) mapping region input names to numpy.array values
:param outputs: (dict) mapping region output names to numpy.arrays that
should be populated with output values by this method
"""
#raise Exception('MULTI-LINE DUMMY\nMULTI-LINE DUMMY')
#For backward compatibility
if self._useAuxiliary is None:
self._useAuxiliary = False
# If the first time being called, then print potential warning messsages
if self._firstComputeCall:
self._firstComputeCall = False
if self._useAuxiliary:
#print "\n Auxiliary input stream from Image Sensor enabled."
if self._justUseAuxiliary == True:
print " Warning: You have chosen to ignore the image data and instead just use the auxiliary data stream."
# Format inputs
#childInputs = [x.wvector(0) for x in inputs["bottomUpIn"]]
#inputVector = numpy.concatenate([x.array() for x in childInputs])
inputVector = inputs['bottomUpIn']
# Look for auxiliary input
if self._useAuxiliary==True:
#auxVector = inputs['auxDataIn'][0].wvector(0).array()
auxVector = inputs['auxDataIn']
if auxVector.dtype != numpy.float32:
raise RuntimeError, "KNNClassifierRegion expects numpy.float32 for the auxiliary data vector"
if self._justUseAuxiliary == True:
#inputVector = inputs['auxDataIn'][0].wvector(0).array()
inputVector = inputs['auxDataIn']
else:
#inputVector = numpy.concatenate([inputVector, inputs['auxDataIn'][0].wvector(0).array()])
inputVector = numpy.concatenate([inputVector, inputs['auxDataIn']])
# Logging
#self.handleLogInput(childInputs)
self.handleLogInput([inputVector])
# Read the category.
assert "categoryIn" in inputs, "No linked category input."
categories = inputs['categoryIn']
# Read the partition ID.
if "partitionIn" in inputs:
assert len(inputs["partitionIn"]) == 1, "Must have exactly one link to partition input."
partInput = inputs['partitionIn']
assert len(partInput) == 1, "Partition input element count must be exactly 1."
partition = int(partInput[0])
else:
partition = None
# ---------------------------------------------------------------------
# Inference (can be done simultaneously with learning)
if self.inferenceMode:
categoriesOut = outputs['categoriesOut']
probabilitiesOut = outputs['categoryProbabilitiesOut']
# If we are sphering, then apply normalization
if self._doSphering:
inputVector = (inputVector + self._normOffset) * self._normScale
nPrototypes = 0
if "bestPrototypeIndices" in outputs:
#bestPrototypeIndicesOut = outputs["bestPrototypeIndices"].wvector()
bestPrototypeIndicesOut = outputs["bestPrototypeIndices"]
nPrototypes = len(bestPrototypeIndicesOut)
winner, inference, protoScores, categoryDistances = \
self._knn.infer(inputVector, partitionId=partition)
if not self.keepAllDistances:
self._protoScores = protoScores
else:
# Keep all prototype scores in an array
if self._protoScores is None:
self._protoScores = numpy.zeros((1, protoScores.shape[0]),
protoScores.dtype)
self._protoScores[0,:] = protoScores#.reshape(1, protoScores.shape[0])
self._protoScoreCount = 1
else:
if self._protoScoreCount == self._protoScores.shape[0]:
# Double the size of the array
newProtoScores = numpy.zeros((self._protoScores.shape[0] * 2,
self._protoScores.shape[1]),
self._protoScores.dtype)
newProtoScores[:self._protoScores.shape[0],:] = self._protoScores
self._protoScores = newProtoScores
# Store the new prototype score
self._protoScores[self._protoScoreCount,:] = protoScores
self._protoScoreCount += 1
self._categoryDistances = categoryDistances
# --------------------------------------------------------------------
# Compute the probability of each category
if self.outputProbabilitiesByDist:
scores = 1.0 - self._categoryDistances
else:
scores = inference
# Probability is simply the scores/scores.sum()
total = scores.sum()
if total == 0:
numScores = len(scores)
probabilities = numpy.ones(numScores) / numScores
else:
probabilities = scores / total
# -------------------------------------------------------------------
# Fill the output vectors with our results
nout = min(len(categoriesOut), len(inference))
categoriesOut.fill(0)
categoriesOut[0:nout] = inference[0:nout]
probabilitiesOut.fill(0)
probabilitiesOut[0:nout] = probabilities[0:nout]
if self.verbosity >= 1:
print "KNNRegion: categoriesOut: ", categoriesOut[0:nout]
print "KNNRegion: probabilitiesOut: ", probabilitiesOut[0:nout]
if self._scanInfo is not None:
self._scanResults = [tuple(inference[:nout])]
# Update the stored confusion matrix.
for category in categories:
if category >= 0:
dims = max(int(category)+1, len(inference))
oldDims = len(self.confusion)
if oldDims < dims:
confusion = numpy.zeros((dims, dims))
confusion[0:oldDims, 0:oldDims] = self.confusion
self.confusion = confusion
self.confusion[inference.argmax(), int(category)] += 1
# Calculate the best prototype indices
if nPrototypes > 1:
bestPrototypeIndicesOut.fill(0)
if categoryDistances is not None:
indices = categoryDistances.argsort()
nout = min(len(indices), nPrototypes)
bestPrototypeIndicesOut[0:nout] = indices[0:nout]
elif nPrototypes == 1:
if (categoryDistances is not None) and len(categoryDistances):
bestPrototypeIndicesOut[0] = categoryDistances.argmin()
else:
bestPrototypeIndicesOut[0] = 0
# Logging
self.handleLogOutput(inference)
# ---------------------------------------------------------------------
# Learning mode
if self.learningMode:
if (self.acceptanceProbability < 1.0) and \
(self._rgen.getReal64() > self.acceptanceProbability):
pass
else:
# Accept the input
for category in categories:
if category >= 0:
# category values of -1 are to be skipped (they are non-categories)
if self._doSphering:
# If we are sphering, then we can't provide the data to the KNN
# library until we have computed per-dimension normalization
# constants. So instead, we'll just store each training sample.
self._storeSample(inputVector, category, partition)
else:
# Pass the raw training sample directly to the KNN library.
self._knn.learn(inputVector, category, partition)
self._epoch += 1
def getCategoryList(self):
"""
Public API for returning the category list. This is a required API of the
NearestNeighbor inspector.
:returns: (list) which has one entry per stored prototype. The value of the
entry is the category # of that stored prototype.
"""
return self._knn._categoryList
def removeCategory(self, categoryToRemove):
"""
Removes a category.
:param categoryToRemove: (string) label to remove
"""
return self._knn.removeCategory(categoryToRemove)
def getLatestDistances(self):
"""
Public API for returning the full scores (distance to each prototype) from
the last :meth:`compute` inference call. This is a required API of the
NearestNeighbor inspector.
:returns: (list) which has one entry per stored prototype. The value of the
entry is distance of the most recenty inferred input from the
stored prototype.
"""
if self._protoScores is not None:
if self.keepAllDistances:
return self._protoScores[self._protoScoreCount - 1,:]
else:
return self._protoScores
else:
return None
def getAllDistances(self):
"""
Like :meth:`~nupic.regions.knn_classifier_region.KNNClassifierRegion.getLatestDistances`,
but returns all the scores if more than one set is available.
:meth:`~nupic.regions.knn_classifier_region.KNNClassifierRegion.getLatestDistances`
will always just return one set of scores.
:returns: (list) all the prototype distances from all computes available.
"""
if self._protoScores is None:
return None
return self._protoScores[:self._protoScoreCount, :]
def calculateProbabilities(self):
# Get the scores, from 0 to 1
scores = 1.0 - self._categoryDistances
# Probability is simply the score/scores.sum()
total = scores.sum()
if total == 0:
numScores = len(scores)
return numpy.ones(numScores) / numScores
return scores / total
def _finishLearning(self):
"""Does nothing. Kept here for API compatibility """
if self._doSphering:
self._finishSphering()
self._knn.finishLearning()
# Compute leave-one-out validation accuracy if
# we actually received non-trivial partition info
self._accuracy = None
def _finishSphering(self):
"""
Compute normalization constants for each feature dimension
based on the collected training samples. Then normalize our
training samples using these constants (so that each input
dimension has mean and variance of zero and one, respectively.)
Then feed these "sphered" training samples into the underlying
SVM model.
"""
# If we are sphering our data, we need to compute the
# per-dimension normalization constants
# First normalize the means (to zero)
self._normOffset = self._samples.mean(axis=0) * -1.0
self._samples += self._normOffset
# Now normalize the variances (to one). However, we need to be
# careful because the variance could conceivably be zero for one
# or more dimensions.
variance = self._samples.var(axis=0)
variance[numpy.where(variance == 0.0)] = 1.0
self._normScale = 1.0 / numpy.sqrt(variance)
self._samples *= self._normScale
# Now feed each "sphered" sample into the SVM library
for sampleIndex in range(len(self._labels)):
self._knn.learn(self._samples[sampleIndex],
self._labels[sampleIndex],
self._partitions[sampleIndex])
def _arraysToLists(self, samplesArray, labelsArray):
labelsList = list(labelsArray)
samplesList = [[float(y) for y in x] for x in [list(x) for x in samplesArray]]
return samplesList, labelsList
def getOutputElementCount(self, name):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.
"""
if name == 'categoriesOut':
return self.maxCategoryCount
elif name == 'categoryProbabilitiesOut':
return self.maxCategoryCount
elif name == 'bestPrototypeIndices':
return self._bestPrototypeIndexCount if self._bestPrototypeIndexCount else 0
else:
raise Exception('Unknown output: ' + name)
@staticmethod
def getSchema():
return KNNClassifierRegionProto
@classmethod
def readFromProto(cls, proto):
if proto.version != KNNClassifierRegion.__VERSION__:
raise RuntimeError("Invalid KNNClassifierRegion Version")
instance = object.__new__(cls)
instance.version = proto.version
instance._knn = knn_classifier.KNNClassifier.read(proto.knn)
instance.knnParams = proto.knnParams.to_dict()
instance._rgen = Random()
instance._rgen.read(proto.rgen)
instance.verbosity = proto.verbosity
instance._firstComputeCall = proto.firstComputeCall
instance.keepAllDistances = proto.keepAllDistances
instance.learningMode = proto.learningMode
instance.inferenceMode = proto.inferenceMode
instance._doSphering = proto.doSphering
instance.outputProbabilitiesByDist = proto.outputProbabilitiesByDist
instance._epoch = proto.epoch
instance.maxStoredPatterns = proto.maxStoredPatterns
instance.maxCategoryCount = proto.maxCategoryCount
instance._bestPrototypeIndexCount = proto.bestPrototypeIndexCount
instance.acceptanceProbability = proto.acceptanceProbability
instance._useAuxiliary = proto.useAuxiliary
instance._justUseAuxiliary = proto.justUseAuxiliary
instance._protoScoreCount = proto.protoScoreCount
instance.confusion = numpy.array(proto.confusion, dtype=numpy.int32)
instance._normOffset = numpy.array(proto.normOffset, dtype=numpy.float32)
instance._normScale = numpy.array(proto.normScale, dtype=numpy.float32)
instance._samples = numpy.array(proto.normScale, dtype=numpy.float32)
instance._labels = numpy.array(proto.labels, dtype=numpy.int32)
instance._partitions = numpy.array(proto.partitions, dtype=numpy.int32)
instance._protoScores = numpy.array(proto.partitions, dtype=numpy.float32)
instance._categoryDistances = numpy.array(proto.categoryDistances,
dtype=numpy.float32)
instance._scanInfo = None
instance._accuracy = None
instance._tapFileIn = None
instance._tapFileOut = None
return instance
def writeToProto(self, proto):
proto.version = self.version
# Convert 'NoneType' to zero. See 'getParameter'
knnParams = self.knnParams.copy()
v = knnParams["numSVDSamples"]
knnParams["numSVDSamples"] = v if v is not None else 0
v = knnParams["numSVDDims"]
knnParams["numSVDDims"] = v if v is not None else 0
v = knnParams["fractionOfMax"]
knnParams["fractionOfMax"] = v if v is not None else 0
# Convert type to capnp compatible
if "outputProbabilitiesByDist" in knnParams:
knnParams["outputProbabilitiesByDist"] = bool(
knnParams["outputProbabilitiesByDist"])
if "doBinarization" in knnParams:
knnParams["doBinarization"] = bool(knnParams["doBinarization"])
if "useSparseMemory" in knnParams:
knnParams["useSparseMemory"] = bool(knnParams["useSparseMemory"])
if "relativeThreshold" in knnParams:
knnParams["relativeThreshold"] = bool(knnParams["relativeThreshold"])
if "doSphering" in knnParams:
knnParams["doSphering"] = bool(knnParams["doSphering"])
if "replaceDuplicates" in knnParams:
knnParams["replaceDuplicates"] = bool(knnParams["replaceDuplicates"])
proto.knnParams = knnParams
self._knn.write(proto.knn)
self._rgen.write(proto.rgen)
proto.verbosity = int(self.verbosity)
proto.firstComputeCall = bool(self._firstComputeCall)
proto.keepAllDistances = bool(self.keepAllDistances)
proto.learningMode = bool(self.learningMode)
proto.inferenceMode = bool(self.inferenceMode)
proto.doSphering = bool(self._doSphering)
proto.outputProbabilitiesByDist = bool(self.outputProbabilitiesByDist)
proto.epoch = int(self._epoch)
proto.maxStoredPatterns = int(self.maxStoredPatterns)
proto.maxCategoryCount = int(self.maxCategoryCount)
proto.bestPrototypeIndexCount = int(self._bestPrototypeIndexCount)
proto.acceptanceProbability = float(self.acceptanceProbability)
proto.useAuxiliary = bool(self._useAuxiliary)
proto.justUseAuxiliary = bool(self._justUseAuxiliary)
proto.protoScoreCount = int(self._protoScoreCount)
if self.confusion is not None:
proto.confusion = self.confusion.tolist()
if self._normOffset is not None:
proto.normOffset = self._normOffset.tolist()
if self._normScale is not None:
proto.normScale = self._normScale.tolist()
if self._samples is not None:
proto.samples = self._samples.tolist()
if self._labels is not None:
proto.labels = self._labels.tolist()
if self._partitions is not None:
proto.partitions = self._partitions.tolist()
if self._protoScores is not None:
proto.protoScores = self._protoScores.tolist()
if self._categoryDistances is not None:
proto.categoryDistances = self._categoryDistances.tolist()
if __name__=='__main__':
from nupic.engine import Network
n = Network()
classifier = n.addRegion(
'classifier',
'py.KNNClassifierRegion',
'{ maxCategoryCount: 48, SVDSampleCount: 400, ' +
' SVDDimCount: 20, distanceNorm: 0.6 }')
| 45,579 | Python | .py | 1,115 | 31.570404 | 117 | 0.627755 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,899 | pluggable_encoder_sensor.py | numenta_nupic-legacy/src/nupic/regions/pluggable_encoder_sensor.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.bindings.regions.PyRegion import PyRegion
class PluggableEncoderSensor(PyRegion):
"""
Holds a value and encodes it into network output.
It requires you to reach in and insert an encoder:
.. code-block:: python
timestampSensor = network.addRegion("timestampSensor",
'py.PluggableEncoderSensor', "")
timestampSensor.getSelf().encoder = DateEncoder(timeOfDay=(21, 9.5),
name="timestamp_timeOfDay")
"""
@classmethod
def getSpec(cls):
return {
'singleNodeOnly': True,
'description': PluggableEncoderSensor.__doc__,
'outputs': {
'encoded': {
'description': '',
'dataType': 'Real32',
'count': 0,
'regionLevel': True,
'isDefaultOutput': True,
}},
'parameters': {},
}
def __init__(self, **kwargs):
# We don't know the sensed value's type, so it's not a spec parameter.
self._sensedValue = None
def initialize(self):
pass
def compute(self, inputs, outputs):
if self.encoder is None:
raise Exception('Please insert an encoder.')
result = self.encoder.encode(self._sensedValue)
self.encoder.encodeIntoArray(self._sensedValue, outputs['encoded'])
def getOutputElementCount(self, name):
if name == 'encoded':
return self.encoder.getWidth()
else:
raise Exception('Unrecognized output %s' % name)
def getSensedValue(self):
"""
:return: sensed value
"""
return self._sensedValue
def setSensedValue(self, value):
"""
:param value: will be encoded when this region does a compute.
"""
self._sensedValue = value
def getParameter(self, parameterName, index=-1):
if parameter == 'sensedValue':
raise Exception('For the PluggableEncoderSensor, get the sensedValue via the getSensedValue method')
else:
raise Exception('Unrecognized parameter %s' % parameterName)
def setParameter(self, parameterName, index, parameterValue):
if parameter == 'sensedValue':
raise Exception('For the PluggableEncoderSensor, set the sensedValue via the setSensedValue method')
else:
raise Exception('Unrecognized parameter %s' % parameterName)
| 3,255 | Python | .py | 81 | 34.654321 | 106 | 0.661382 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |