repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/inference/auto_model.py | # -*- coding: utf-8 -*-
# Copyright 2020 The HuggingFace Inc. team and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Auto Model modules."""
import logging
import warnings
import os
import copy
from collections import OrderedDict
from tensorflow_tts.configs import (
FastSpeechConfig,
FastSpeech2Config,
MelGANGeneratorConfig,
MultiBandMelGANGeneratorConfig,
HifiGANGeneratorConfig,
Tacotron2Config,
ParallelWaveGANGeneratorConfig,
)
from tensorflow_tts.models import (
TFMelGANGenerator,
TFMBMelGANGenerator,
TFHifiGANGenerator,
TFParallelWaveGANGenerator,
)
from tensorflow_tts.inference.savable_models import (
SavableTFFastSpeech,
SavableTFFastSpeech2,
SavableTFTacotron2
)
from tensorflow_tts.utils import CACHE_DIRECTORY, MODEL_FILE_NAME, LIBRARY_NAME
from tensorflow_tts import __version__ as VERSION
from huggingface_hub import hf_hub_url, cached_download
TF_MODEL_MAPPING = OrderedDict(
[
(FastSpeech2Config, SavableTFFastSpeech2),
(FastSpeechConfig, SavableTFFastSpeech),
(MultiBandMelGANGeneratorConfig, TFMBMelGANGenerator),
(MelGANGeneratorConfig, TFMelGANGenerator),
(Tacotron2Config, SavableTFTacotron2),
(HifiGANGeneratorConfig, TFHifiGANGenerator),
(ParallelWaveGANGeneratorConfig, TFParallelWaveGANGenerator),
]
)
class TFAutoModel(object):
"""General model class for inferencing."""
def __init__(self):
raise EnvironmentError("Cannot be instantiated using `__init__()`")
@classmethod
def from_pretrained(cls, pretrained_path=None, config=None, **kwargs):
# load weights from hf hub
if pretrained_path is not None:
if not os.path.isfile(pretrained_path):
# retrieve correct hub url
download_url = hf_hub_url(repo_id=pretrained_path, filename=MODEL_FILE_NAME)
downloaded_file = str(
cached_download(
url=download_url,
library_name=LIBRARY_NAME,
library_version=VERSION,
cache_dir=CACHE_DIRECTORY,
)
)
# load config from repo as well
if config is None:
from tensorflow_tts.inference import AutoConfig
config = AutoConfig.from_pretrained(pretrained_path)
pretrained_path = downloaded_file
assert config is not None, "Please make sure to pass a config along to load a model from a local file"
for config_class, model_class in TF_MODEL_MAPPING.items():
if isinstance(config, config_class) and str(config_class.__name__) in str(
config
):
model = model_class(config=config, **kwargs)
model.set_config(config)
model._build()
if pretrained_path is not None and ".h5" in pretrained_path:
try:
model.load_weights(pretrained_path)
except:
model.load_weights(
pretrained_path, by_name=True, skip_mismatch=True
)
return model
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in TF_MODEL_MAPPING.keys()),
)
)
| 4,153 | 33.04918 | 110 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/inference/savable_models.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Savable Model modules."""
import numpy as np
import tensorflow as tf
from tensorflow_tts.models import (
TFFastSpeech,
TFFastSpeech2,
TFMelGANGenerator,
TFMBMelGANGenerator,
TFHifiGANGenerator,
TFTacotron2,
TFParallelWaveGANGenerator,
)
class SavableTFTacotron2(TFTacotron2):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
def call(self, inputs, training=False):
input_ids, input_lengths, speaker_ids = inputs
return super().inference(input_ids, input_lengths, speaker_ids)
def _build(self):
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9]], dtype=tf.int32)
input_lengths = tf.convert_to_tensor([9], dtype=tf.int32)
speaker_ids = tf.convert_to_tensor([0], dtype=tf.int32)
self([input_ids, input_lengths, speaker_ids])
class SavableTFFastSpeech(TFFastSpeech):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
def call(self, inputs, training=False):
input_ids, speaker_ids, speed_ratios = inputs
return super()._inference(input_ids, speaker_ids, speed_ratios)
def _build(self):
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
speed_ratios = tf.convert_to_tensor([1.0], tf.float32)
self([input_ids, speaker_ids, speed_ratios])
class SavableTFFastSpeech2(TFFastSpeech2):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
def call(self, inputs, training=False):
input_ids, speaker_ids, speed_ratios, f0_ratios, energy_ratios = inputs
return super()._inference(
input_ids, speaker_ids, speed_ratios, f0_ratios, energy_ratios
)
def _build(self):
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
speed_ratios = tf.convert_to_tensor([1.0], tf.float32)
f0_ratios = tf.convert_to_tensor([1.0], tf.float32)
energy_ratios = tf.convert_to_tensor([1.0], tf.float32)
self([input_ids, speaker_ids, speed_ratios, f0_ratios, energy_ratios])
| 2,870 | 35.807692 | 87 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/inference/auto_config.py | # -*- coding: utf-8 -*-
# Copyright 2020 The HuggingFace Inc. team and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Auto Config modules."""
import logging
import yaml
import os
from collections import OrderedDict
from tensorflow_tts.configs import (
FastSpeechConfig,
FastSpeech2Config,
MelGANGeneratorConfig,
MultiBandMelGANGeneratorConfig,
HifiGANGeneratorConfig,
Tacotron2Config,
ParallelWaveGANGeneratorConfig,
)
from tensorflow_tts.utils import CACHE_DIRECTORY, CONFIG_FILE_NAME, LIBRARY_NAME
from tensorflow_tts import __version__ as VERSION
from huggingface_hub import hf_hub_url, cached_download
CONFIG_MAPPING = OrderedDict(
[
("fastspeech", FastSpeechConfig),
("fastspeech2", FastSpeech2Config),
("multiband_melgan_generator", MultiBandMelGANGeneratorConfig),
("melgan_generator", MelGANGeneratorConfig),
("hifigan_generator", HifiGANGeneratorConfig),
("tacotron2", Tacotron2Config),
("parallel_wavegan_generator", ParallelWaveGANGeneratorConfig),
]
)
class AutoConfig:
def __init__(self):
raise EnvironmentError(
"AutoConfig is designed to be instantiated "
"using the `AutoConfig.from_pretrained(pretrained_path)` method."
)
@classmethod
def from_pretrained(cls, pretrained_path, **kwargs):
# load weights from hf hub
if not os.path.isfile(pretrained_path):
# retrieve correct hub url
download_url = hf_hub_url(
repo_id=pretrained_path, filename=CONFIG_FILE_NAME
)
pretrained_path = str(
cached_download(
url=download_url,
library_name=LIBRARY_NAME,
library_version=VERSION,
cache_dir=CACHE_DIRECTORY,
)
)
with open(pretrained_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
try:
model_type = config["model_type"]
config_class = CONFIG_MAPPING[model_type]
config_class = config_class(**config[model_type + "_params"], **kwargs)
config_class.set_config_params(config)
return config_class
except Exception:
raise ValueError(
"Unrecognized config in {}. "
"Should have a `model_type` key in its config.yaml, or contain one of the following strings "
"in its name: {}".format(
pretrained_path, ", ".join(CONFIG_MAPPING.keys())
)
)
| 3,150 | 33.626374 | 109 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/inference/__init__.py | from tensorflow_tts.inference.auto_model import TFAutoModel
from tensorflow_tts.inference.auto_config import AutoConfig
from tensorflow_tts.inference.auto_processor import AutoProcessor
| 186 | 45.75 | 65 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/inference/auto_processor.py | # -*- coding: utf-8 -*-
# Copyright 2020 The TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Auto Processor modules."""
import logging
import json
import os
from collections import OrderedDict
from tensorflow_tts.processor import (
LJSpeechProcessor,
KSSProcessor,
BakerProcessor,
LibriTTSProcessor,
ThorstenProcessor,
LJSpeechUltimateProcessor,
SynpaflexProcessor,
JSUTProcessor,
)
from tensorflow_tts.utils import CACHE_DIRECTORY, PROCESSOR_FILE_NAME, LIBRARY_NAME
from tensorflow_tts import __version__ as VERSION
from huggingface_hub import hf_hub_url, cached_download
CONFIG_MAPPING = OrderedDict(
[
("LJSpeechProcessor", LJSpeechProcessor),
("KSSProcessor", KSSProcessor),
("BakerProcessor", BakerProcessor),
("LibriTTSProcessor", LibriTTSProcessor),
("ThorstenProcessor", ThorstenProcessor),
("LJSpeechUltimateProcessor", LJSpeechUltimateProcessor),
("SynpaflexProcessor", SynpaflexProcessor),
("JSUTProcessor", JSUTProcessor),
]
)
class AutoProcessor:
def __init__(self):
raise EnvironmentError(
"AutoProcessor is designed to be instantiated "
"using the `AutoProcessor.from_pretrained(pretrained_path)` method."
)
@classmethod
def from_pretrained(cls, pretrained_path, **kwargs):
# load weights from hf hub
if not os.path.isfile(pretrained_path):
# retrieve correct hub url
download_url = hf_hub_url(repo_id=pretrained_path, filename=PROCESSOR_FILE_NAME)
pretrained_path = str(
cached_download(
url=download_url,
library_name=LIBRARY_NAME,
library_version=VERSION,
cache_dir=CACHE_DIRECTORY,
)
)
with open(pretrained_path, "r") as f:
config = json.load(f)
try:
processor_name = config["processor_name"]
processor_class = CONFIG_MAPPING[processor_name]
processor_class = processor_class(
data_dir=None, loaded_mapper_path=pretrained_path
)
return processor_class
except Exception:
raise ValueError(
"Unrecognized processor in {}. "
"Should have a `processor_name` key in its config.json, or contain one of the following strings "
"in its name: {}".format(
pretrained_path, ", ".join(CONFIG_MAPPING.keys())
)
)
| 3,111 | 33.197802 | 113 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/losses/stft.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""STFT-based loss modules."""
import tensorflow as tf
class TFSpectralConvergence(tf.keras.layers.Layer):
"""Spectral convergence loss."""
def __init__(self):
"""Initialize."""
super().__init__()
def call(self, y_mag, x_mag):
"""Calculate forward propagation.
Args:
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return tf.norm(y_mag - x_mag, ord="fro", axis=(-2, -1)) / tf.norm(
y_mag, ord="fro", axis=(-2, -1)
)
class TFLogSTFTMagnitude(tf.keras.layers.Layer):
"""Log STFT magnitude loss module."""
def __init__(self):
"""Initialize."""
super().__init__()
def call(self, y_mag, x_mag):
"""Calculate forward propagation.
Args:
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return tf.abs(tf.math.log(y_mag) - tf.math.log(x_mag))
class TFSTFT(tf.keras.layers.Layer):
"""STFT loss module."""
def __init__(self, frame_length=600, frame_step=120, fft_length=1024):
"""Initialize."""
super().__init__()
self.frame_length = frame_length
self.frame_step = frame_step
self.fft_length = fft_length
self.spectral_convergenge_loss = TFSpectralConvergence()
self.log_stft_magnitude_loss = TFLogSTFTMagnitude()
def call(self, y, x):
"""Calculate forward propagation.
Args:
y (Tensor): Groundtruth signal (B, T).
x (Tensor): Predicted signal (B, T).
Returns:
Tensor: Spectral convergence loss value (pre-reduce).
Tensor: Log STFT magnitude loss value (pre-reduce).
"""
x_mag = tf.abs(
tf.signal.stft(
signals=x,
frame_length=self.frame_length,
frame_step=self.frame_step,
fft_length=self.fft_length,
)
)
y_mag = tf.abs(
tf.signal.stft(
signals=y,
frame_length=self.frame_length,
frame_step=self.frame_step,
fft_length=self.fft_length,
)
)
# add small number to prevent nan value.
# compatible with pytorch version.
x_mag = tf.clip_by_value(tf.math.sqrt(x_mag ** 2 + 1e-7), 1e-7, 1e3)
y_mag = tf.clip_by_value(tf.math.sqrt(y_mag ** 2 + 1e-7), 1e-7, 1e3)
sc_loss = self.spectral_convergenge_loss(y_mag, x_mag)
mag_loss = self.log_stft_magnitude_loss(y_mag, x_mag)
return sc_loss, mag_loss
class TFMultiResolutionSTFT(tf.keras.layers.Layer):
"""Multi resolution STFT loss module."""
def __init__(
self,
fft_lengths=[1024, 2048, 512],
frame_lengths=[600, 1200, 240],
frame_steps=[120, 240, 50],
):
"""Initialize Multi resolution STFT loss module.
Args:
frame_lengths (list): List of FFT sizes.
frame_steps (list): List of hop sizes.
fft_lengths (list): List of window lengths.
"""
super().__init__()
assert len(frame_lengths) == len(frame_steps) == len(fft_lengths)
self.stft_losses = []
for frame_length, frame_step, fft_length in zip(
frame_lengths, frame_steps, fft_lengths
):
self.stft_losses.append(TFSTFT(frame_length, frame_step, fft_length))
def call(self, y, x):
"""Calculate forward propagation.
Args:
y (Tensor): Groundtruth signal (B, T).
x (Tensor): Predicted signal (B, T).
Returns:
Tensor: Multi resolution spectral convergence loss value.
Tensor: Multi resolution log STFT magnitude loss value.
"""
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
sc_l, mag_l = f(y, x)
sc_loss += tf.reduce_mean(sc_l, axis=list(range(1, len(sc_l.shape))))
mag_loss += tf.reduce_mean(mag_l, axis=list(range(1, len(mag_l.shape))))
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return sc_loss, mag_loss
| 5,179 | 33.533333 | 97 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/losses/spectrogram.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spectrogram-based loss modules."""
import tensorflow as tf
class TFMelSpectrogram(tf.keras.layers.Layer):
"""Mel Spectrogram loss."""
def __init__(
self,
n_mels=80,
f_min=80.0,
f_max=7600,
frame_length=1024,
frame_step=256,
fft_length=1024,
sample_rate=16000,
**kwargs
):
"""Initialize."""
super().__init__(**kwargs)
self.frame_length = frame_length
self.frame_step = frame_step
self.fft_length = fft_length
self.linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
n_mels, fft_length // 2 + 1, sample_rate, f_min, f_max
)
def _calculate_log_mels_spectrogram(self, signals):
"""Calculate forward propagation.
Args:
signals (Tensor): signal (B, T).
Returns:
Tensor: Mel spectrogram (B, T', 80)
"""
stfts = tf.signal.stft(
signals,
frame_length=self.frame_length,
frame_step=self.frame_step,
fft_length=self.fft_length,
)
linear_spectrograms = tf.abs(stfts)
mel_spectrograms = tf.tensordot(
linear_spectrograms, self.linear_to_mel_weight_matrix, 1
)
mel_spectrograms.set_shape(
linear_spectrograms.shape[:-1].concatenate(
self.linear_to_mel_weight_matrix.shape[-1:]
)
)
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6) # prevent nan.
return log_mel_spectrograms
def call(self, y, x):
"""Calculate forward propagation.
Args:
y (Tensor): Groundtruth signal (B, T).
x (Tensor): Predicted signal (B, T).
Returns:
Tensor: Mean absolute Error Spectrogram Loss.
"""
y_mels = self._calculate_log_mels_spectrogram(y)
x_mels = self._calculate_log_mels_spectrogram(x)
return tf.reduce_mean(
tf.abs(y_mels - x_mels), axis=list(range(1, len(x_mels.shape)))
)
| 2,697 | 31.902439 | 83 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/losses/__init__.py | from tensorflow_tts.losses.spectrogram import TFMelSpectrogram
from tensorflow_tts.losses.stft import TFMultiResolutionSTFT
| 124 | 40.666667 | 62 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/trainers/base_trainer.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Based Trainer."""
import abc
import logging
import os
import tensorflow as tf
from tqdm import tqdm
from tensorflow_tts.optimizers import GradientAccumulator
from tensorflow_tts.utils import utils
class BasedTrainer(metaclass=abc.ABCMeta):
"""Customized trainer module for all models."""
def __init__(self, steps, epochs, config):
self.steps = steps
self.epochs = epochs
self.config = config
self.finish_train = False
self.writer = tf.summary.create_file_writer(config["outdir"])
self.train_data_loader = None
self.eval_data_loader = None
self.train_metrics = None
self.eval_metrics = None
self.list_metrics_name = None
def init_train_eval_metrics(self, list_metrics_name):
"""Init train and eval metrics to save it to tensorboard."""
self.train_metrics = {}
self.eval_metrics = {}
for name in list_metrics_name:
self.train_metrics.update(
{name: tf.keras.metrics.Mean(name="train_" + name, dtype=tf.float32)}
)
self.eval_metrics.update(
{name: tf.keras.metrics.Mean(name="eval_" + name, dtype=tf.float32)}
)
def reset_states_train(self):
"""Reset train metrics after save it to tensorboard."""
for metric in self.train_metrics.keys():
self.train_metrics[metric].reset_states()
def reset_states_eval(self):
"""Reset eval metrics after save it to tensorboard."""
for metric in self.eval_metrics.keys():
self.eval_metrics[metric].reset_states()
def update_train_metrics(self, dict_metrics_losses):
for name, value in dict_metrics_losses.items():
self.train_metrics[name].update_state(value)
def update_eval_metrics(self, dict_metrics_losses):
for name, value in dict_metrics_losses.items():
self.eval_metrics[name].update_state(value)
def set_train_data_loader(self, train_dataset):
"""Set train data loader (MUST)."""
self.train_data_loader = train_dataset
def get_train_data_loader(self):
"""Get train data loader."""
return self.train_data_loader
def set_eval_data_loader(self, eval_dataset):
"""Set eval data loader (MUST)."""
self.eval_data_loader = eval_dataset
def get_eval_data_loader(self):
"""Get eval data loader."""
return self.eval_data_loader
@abc.abstractmethod
def compile(self):
pass
@abc.abstractmethod
def create_checkpoint_manager(self, saved_path=None, max_to_keep=10):
"""Create checkpoint management."""
pass
def run(self):
"""Run training."""
self.tqdm = tqdm(
initial=self.steps, total=self.config["train_max_steps"], desc="[train]"
)
while True:
self._train_epoch()
if self.finish_train:
break
self.tqdm.close()
logging.info("Finish training.")
@abc.abstractmethod
def save_checkpoint(self):
"""Save checkpoint."""
pass
@abc.abstractmethod
def load_checkpoint(self, pretrained_path):
"""Load checkpoint."""
pass
def _train_epoch(self):
"""Train model one epoch."""
for train_steps_per_epoch, batch in enumerate(self.train_data_loader, 1):
# one step training
self._train_step(batch)
# check interval
self._check_log_interval()
self._check_eval_interval()
self._check_save_interval()
# check wheter training is finished
if self.finish_train:
return
# update
self.epochs += 1
self.train_steps_per_epoch = train_steps_per_epoch
logging.info(
f"(Steps: {self.steps}) Finished {self.epochs} epoch training "
f"({self.train_steps_per_epoch} steps per epoch)."
)
@abc.abstractmethod
def _eval_epoch(self):
"""One epoch evaluation."""
pass
@abc.abstractmethod
def _train_step(self, batch):
"""One step training."""
pass
@abc.abstractmethod
def _check_log_interval(self):
"""Save log interval."""
pass
@abc.abstractmethod
def fit(self):
pass
def _check_eval_interval(self):
"""Evaluation interval step."""
if self.steps % self.config["eval_interval_steps"] == 0:
self._eval_epoch()
def _check_save_interval(self):
"""Save interval checkpoint."""
if self.steps % self.config["save_interval_steps"] == 0:
self.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {self.steps} steps.")
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
pass
def _write_to_tensorboard(self, list_metrics, stage="train"):
"""Write variables to tensorboard."""
with self.writer.as_default():
for key, value in list_metrics.items():
tf.summary.scalar(stage + "/" + key, value.result(), step=self.steps)
self.writer.flush()
class GanBasedTrainer(BasedTrainer):
"""Customized trainer module for GAN TTS training (MelGAN, GAN-TTS, ParallelWaveGAN)."""
def __init__(
self,
steps,
epochs,
config,
strategy,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
"""
super().__init__(steps, epochs, config)
self._is_generator_mixed_precision = is_generator_mixed_precision
self._is_discriminator_mixed_precision = is_discriminator_mixed_precision
self._strategy = strategy
self._already_apply_input_signature = False
self._generator_gradient_accumulator = GradientAccumulator()
self._discriminator_gradient_accumulator = GradientAccumulator()
self._generator_gradient_accumulator.reset()
self._discriminator_gradient_accumulator.reset()
def init_train_eval_metrics(self, list_metrics_name):
with self._strategy.scope():
super().init_train_eval_metrics(list_metrics_name)
def get_n_gpus(self):
return self._strategy.num_replicas_in_sync
def _get_train_element_signature(self):
return self.train_data_loader.element_spec
def _get_eval_element_signature(self):
return self.eval_data_loader.element_spec
def set_gen_model(self, generator_model):
"""Set generator class model (MUST)."""
self._generator = generator_model
def get_gen_model(self):
"""Get generator model."""
return self._generator
def set_dis_model(self, discriminator_model):
"""Set discriminator class model (MUST)."""
self._discriminator = discriminator_model
def get_dis_model(self):
"""Get discriminator model."""
return self._discriminator
def set_gen_optimizer(self, generator_optimizer):
"""Set generator optimizer (MUST)."""
self._gen_optimizer = generator_optimizer
if self._is_generator_mixed_precision:
self._gen_optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
self._gen_optimizer, "dynamic"
)
def get_gen_optimizer(self):
"""Get generator optimizer."""
return self._gen_optimizer
def set_dis_optimizer(self, discriminator_optimizer):
"""Set discriminator optimizer (MUST)."""
self._dis_optimizer = discriminator_optimizer
if self._is_discriminator_mixed_precision:
self._dis_optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
self._dis_optimizer, "dynamic"
)
def get_dis_optimizer(self):
"""Get discriminator optimizer."""
return self._dis_optimizer
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer):
self.set_gen_model(gen_model)
self.set_dis_model(dis_model)
self.set_gen_optimizer(gen_optimizer)
self.set_dis_optimizer(dis_optimizer)
def _train_step(self, batch):
if self._already_apply_input_signature is False:
train_element_signature = self._get_train_element_signature()
eval_element_signature = self._get_eval_element_signature()
self.one_step_forward = tf.function(
self._one_step_forward, input_signature=[train_element_signature]
)
self.one_step_evaluate = tf.function(
self._one_step_evaluate, input_signature=[eval_element_signature]
)
self.one_step_predict = tf.function(
self._one_step_predict, input_signature=[eval_element_signature]
)
self._already_apply_input_signature = True
# run one_step_forward
self.one_step_forward(batch)
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
def _one_step_forward(self, batch):
per_replica_losses = self._strategy.run(
self._one_step_forward_per_replica, args=(batch,)
)
return self._strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
@abc.abstractmethod
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
per_example_losses = 0.0
dict_metrics_losses = {}
return per_example_losses, dict_metrics_losses
@abc.abstractmethod
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
"""Compute per example discriminator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
per_example_losses = 0.0
dict_metrics_losses = {}
return per_example_losses, dict_metrics_losses
def _calculate_generator_gradient_per_batch(self, batch):
outputs = self._generator(**batch, training=True)
(
per_example_losses,
dict_metrics_losses,
) = self.compute_per_example_generator_losses(batch, outputs)
per_replica_gen_losses = tf.nn.compute_average_loss(
per_example_losses,
global_batch_size=self.config["batch_size"]
* self.get_n_gpus()
* self.config["gradient_accumulation_steps"],
)
if self._is_generator_mixed_precision:
scaled_per_replica_gen_losses = self._gen_optimizer.get_scaled_loss(
per_replica_gen_losses
)
if self._is_generator_mixed_precision:
scaled_gradients = tf.gradients(
scaled_per_replica_gen_losses, self._generator.trainable_variables
)
gradients = self._gen_optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tf.gradients(
per_replica_gen_losses, self._generator.trainable_variables
)
# gradient accumulate for generator here
if self.config["gradient_accumulation_steps"] > 1:
self._generator_gradient_accumulator(gradients)
# accumulate loss into metrics
self.update_train_metrics(dict_metrics_losses)
if self.config["gradient_accumulation_steps"] == 1:
return gradients, per_replica_gen_losses
else:
return per_replica_gen_losses
def _calculate_discriminator_gradient_per_batch(self, batch):
(
per_example_losses,
dict_metrics_losses,
) = self.compute_per_example_discriminator_losses(
batch, self._generator(**batch, training=True)
)
per_replica_dis_losses = tf.nn.compute_average_loss(
per_example_losses,
global_batch_size=self.config["batch_size"]
* self.get_n_gpus()
* self.config["gradient_accumulation_steps"],
)
if self._is_discriminator_mixed_precision:
scaled_per_replica_dis_losses = self._dis_optimizer.get_scaled_loss(
per_replica_dis_losses
)
if self._is_discriminator_mixed_precision:
scaled_gradients = tf.gradients(
scaled_per_replica_dis_losses,
self._discriminator.trainable_variables,
)
gradients = self._dis_optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tf.gradients(
per_replica_dis_losses, self._discriminator.trainable_variables
)
# accumulate loss into metrics
self.update_train_metrics(dict_metrics_losses)
# gradient accumulate for discriminator here
if self.config["gradient_accumulation_steps"] > 1:
self._discriminator_gradient_accumulator(gradients)
if self.config["gradient_accumulation_steps"] == 1:
return gradients, per_replica_dis_losses
else:
return per_replica_dis_losses
def _one_step_forward_per_replica(self, batch):
per_replica_gen_losses = 0.0
per_replica_dis_losses = 0.0
if self.config["gradient_accumulation_steps"] == 1:
(
gradients,
per_replica_gen_losses,
) = self._calculate_generator_gradient_per_batch(batch)
self._gen_optimizer.apply_gradients(
zip(gradients, self._generator.trainable_variables)
)
else:
# gradient acummulation here.
for i in tf.range(self.config["gradient_accumulation_steps"]):
reduced_batch = {
k: v[
i
* self.config["batch_size"] : (i + 1)
* self.config["batch_size"]
]
for k, v in batch.items()
}
# run 1 step accumulate
reduced_batch_losses = self._calculate_generator_gradient_per_batch(
reduced_batch
)
# sum per_replica_losses
per_replica_gen_losses += reduced_batch_losses
gradients = self._generator_gradient_accumulator.gradients
self._gen_optimizer.apply_gradients(
zip(gradients, self._generator.trainable_variables)
)
self._generator_gradient_accumulator.reset()
# one step discriminator
# recompute y_hat after 1 step generator for discriminator training.
if self.steps >= self.config["discriminator_train_start_steps"]:
if self.config["gradient_accumulation_steps"] == 1:
(
gradients,
per_replica_dis_losses,
) = self._calculate_discriminator_gradient_per_batch(batch)
self._dis_optimizer.apply_gradients(
zip(gradients, self._discriminator.trainable_variables)
)
else:
# gradient acummulation here.
for i in tf.range(self.config["gradient_accumulation_steps"]):
reduced_batch = {
k: v[
i
* self.config["batch_size"] : (i + 1)
* self.config["batch_size"]
]
for k, v in batch.items()
}
# run 1 step accumulate
reduced_batch_losses = (
self._calculate_discriminator_gradient_per_batch(reduced_batch)
)
# sum per_replica_losses
per_replica_dis_losses += reduced_batch_losses
gradients = self._discriminator_gradient_accumulator.gradients
self._dis_optimizer.apply_gradients(
zip(gradients, self._discriminator.trainable_variables)
)
self._discriminator_gradient_accumulator.reset()
return per_replica_gen_losses + per_replica_dis_losses
def _eval_epoch(self):
"""Evaluate model one epoch."""
logging.info(f"(Steps: {self.steps}) Start evaluation.")
# calculate loss for each batch
for eval_steps_per_epoch, batch in enumerate(
tqdm(self.eval_data_loader, desc="[eval]"), 1
):
# eval one step
self.one_step_evaluate(batch)
if eval_steps_per_epoch <= self.config["num_save_intermediate_results"]:
# save intermedia
self.generate_and_save_intermediate_result(batch)
logging.info(
f"(Steps: {self.steps}) Finished evaluation "
f"({eval_steps_per_epoch} steps per epoch)."
)
# average loss
for key in self.eval_metrics.keys():
logging.info(
f"(Steps: {self.steps}) eval_{key} = {self.eval_metrics[key].result():.4f}."
)
# record
self._write_to_tensorboard(self.eval_metrics, stage="eval")
# reset
self.reset_states_eval()
def _one_step_evaluate_per_replica(self, batch):
################################################
# one step generator.
outputs = self._generator(**batch, training=False)
_, dict_metrics_losses = self.compute_per_example_generator_losses(
batch, outputs
)
# accumulate loss into metrics
self.update_eval_metrics(dict_metrics_losses)
################################################
# one step discriminator
if self.steps >= self.config["discriminator_train_start_steps"]:
_, dict_metrics_losses = self.compute_per_example_discriminator_losses(
batch, outputs
)
# accumulate loss into metrics
self.update_eval_metrics(dict_metrics_losses)
################################################
def _one_step_evaluate(self, batch):
self._strategy.run(self._one_step_evaluate_per_replica, args=(batch,))
def _one_step_predict_per_replica(self, batch):
outputs = self._generator(**batch, training=False)
return outputs
def _one_step_predict(self, batch):
outputs = self._strategy.run(self._one_step_predict_per_replica, args=(batch,))
return outputs
@abc.abstractmethod
def generate_and_save_intermediate_result(self, batch):
return
def create_checkpoint_manager(self, saved_path=None, max_to_keep=10):
"""Create checkpoint management."""
if saved_path is None:
saved_path = self.config["outdir"] + "/checkpoints/"
os.makedirs(saved_path, exist_ok=True)
self.saved_path = saved_path
self.ckpt = tf.train.Checkpoint(
steps=tf.Variable(1),
epochs=tf.Variable(1),
gen_optimizer=self.get_gen_optimizer(),
dis_optimizer=self.get_dis_optimizer(),
)
self.ckp_manager = tf.train.CheckpointManager(
self.ckpt, saved_path, max_to_keep=max_to_keep
)
def save_checkpoint(self):
"""Save checkpoint."""
self.ckpt.steps.assign(self.steps)
self.ckpt.epochs.assign(self.epochs)
self.ckp_manager.save(checkpoint_number=self.steps)
utils.save_weights(
self._generator,
self.saved_path + "generator-{}.h5".format(self.steps)
)
utils.save_weights(
self._discriminator,
self.saved_path + "discriminator-{}.h5".format(self.steps)
)
def load_checkpoint(self, pretrained_path):
"""Load checkpoint."""
self.ckpt.restore(pretrained_path)
self.steps = self.ckpt.steps.numpy()
self.epochs = self.ckpt.epochs.numpy()
self._gen_optimizer = self.ckpt.gen_optimizer
# re-assign iterations (global steps) for gen_optimizer.
self._gen_optimizer.iterations.assign(tf.cast(self.steps, tf.int64))
# re-assign iterations (global steps) for dis_optimizer.
try:
discriminator_train_start_steps = self.config[
"discriminator_train_start_steps"
]
discriminator_train_start_steps = tf.math.maximum(
0, self.steps - discriminator_train_start_steps
)
except Exception:
discriminator_train_start_steps = self.steps
self._dis_optimizer = self.ckpt.dis_optimizer
self._dis_optimizer.iterations.assign(
tf.cast(discriminator_train_start_steps, tf.int64)
)
# load weights.
utils.load_weights(
self._generator,
self.saved_path + "generator-{}.h5".format(self.steps)
)
utils.load_weights(
self._discriminator,
self.saved_path + "discriminator-{}.h5".format(self.steps)
)
def _check_train_finish(self):
"""Check training finished."""
if self.steps >= self.config["train_max_steps"]:
self.finish_train = True
if (
self.steps != 0
and self.steps == self.config["discriminator_train_start_steps"]
):
self.finish_train = True
logging.info(
f"Finished training only generator at {self.steps}steps, pls resume and continue training."
)
def _check_log_interval(self):
"""Log to tensorboard."""
if self.steps % self.config["log_interval_steps"] == 0:
for metric_name in self.list_metrics_name:
logging.info(
f"(Step: {self.steps}) train_{metric_name} = {self.train_metrics[metric_name].result():.4f}."
)
self._write_to_tensorboard(self.train_metrics, stage="train")
# reset
self.reset_states_train()
def fit(self, train_data_loader, valid_data_loader, saved_path, resume=None):
self.set_train_data_loader(train_data_loader)
self.set_eval_data_loader(valid_data_loader)
self.train_data_loader = self._strategy.experimental_distribute_dataset(
self.train_data_loader
)
self.eval_data_loader = self._strategy.experimental_distribute_dataset(
self.eval_data_loader
)
with self._strategy.scope():
self.create_checkpoint_manager(saved_path=saved_path, max_to_keep=10000)
if len(resume) > 1:
self.load_checkpoint(resume)
logging.info(f"Successfully resumed from {resume}.")
self.run()
class Seq2SeqBasedTrainer(BasedTrainer, metaclass=abc.ABCMeta):
"""Customized trainer module for Seq2Seq TTS training (Tacotron, FastSpeech)."""
def __init__(
self, steps, epochs, config, strategy, is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
strategy (tf.distribute): Strategy for distributed training.
is_mixed_precision (bool): Use mixed_precision training or not.
"""
super().__init__(steps, epochs, config)
self._is_mixed_precision = is_mixed_precision
self._strategy = strategy
self._model = None
self._optimizer = None
self._trainable_variables = None
# check if we already apply input_signature for train_step.
self._already_apply_input_signature = False
# create gradient accumulator
self._gradient_accumulator = GradientAccumulator()
self._gradient_accumulator.reset()
def init_train_eval_metrics(self, list_metrics_name):
with self._strategy.scope():
super().init_train_eval_metrics(list_metrics_name)
def set_model(self, model):
"""Set generator class model (MUST)."""
self._model = model
def get_model(self):
"""Get generator model."""
return self._model
def set_optimizer(self, optimizer):
"""Set optimizer (MUST)."""
self._optimizer = optimizer
if self._is_mixed_precision:
self._optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
self._optimizer, "dynamic"
)
def get_optimizer(self):
"""Get optimizer."""
return self._optimizer
def get_n_gpus(self):
return self._strategy.num_replicas_in_sync
def compile(self, model, optimizer):
self.set_model(model)
self.set_optimizer(optimizer)
self._trainable_variables = self._train_vars()
def _train_vars(self):
if self.config["var_train_expr"]:
list_train_var = self.config["var_train_expr"].split("|")
return [
v
for v in self._model.trainable_variables
if self._check_string_exist(list_train_var, v.name)
]
return self._model.trainable_variables
def _check_string_exist(self, list_string, inp_string):
for string in list_string:
if string in inp_string:
return True
return False
def _get_train_element_signature(self):
return self.train_data_loader.element_spec
def _get_eval_element_signature(self):
return self.eval_data_loader.element_spec
def _train_step(self, batch):
if self._already_apply_input_signature is False:
train_element_signature = self._get_train_element_signature()
eval_element_signature = self._get_eval_element_signature()
self.one_step_forward = tf.function(
self._one_step_forward, input_signature=[train_element_signature]
)
self.one_step_evaluate = tf.function(
self._one_step_evaluate, input_signature=[eval_element_signature]
)
self.one_step_predict = tf.function(
self._one_step_predict, input_signature=[eval_element_signature]
)
self._already_apply_input_signature = True
# run one_step_forward
self.one_step_forward(batch)
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
def _one_step_forward(self, batch):
per_replica_losses = self._strategy.run(
self._one_step_forward_per_replica, args=(batch,)
)
return self._strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
def _calculate_gradient_per_batch(self, batch):
outputs = self._model(**batch, training=True)
per_example_losses, dict_metrics_losses = self.compute_per_example_losses(
batch, outputs
)
per_replica_losses = tf.nn.compute_average_loss(
per_example_losses,
global_batch_size=self.config["batch_size"]
* self.get_n_gpus()
* self.config["gradient_accumulation_steps"],
)
if self._is_mixed_precision:
scaled_per_replica_losses = self._optimizer.get_scaled_loss(
per_replica_losses
)
if self._is_mixed_precision:
scaled_gradients = tf.gradients(
scaled_per_replica_losses, self._trainable_variables
)
gradients = self._optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tf.gradients(per_replica_losses, self._trainable_variables)
# gradient accumulate here
if self.config["gradient_accumulation_steps"] > 1:
self._gradient_accumulator(gradients)
# accumulate loss into metrics
self.update_train_metrics(dict_metrics_losses)
if self.config["gradient_accumulation_steps"] == 1:
return gradients, per_replica_losses
else:
return per_replica_losses
def _one_step_forward_per_replica(self, batch):
if self.config["gradient_accumulation_steps"] == 1:
gradients, per_replica_losses = self._calculate_gradient_per_batch(batch)
self._optimizer.apply_gradients(
zip(gradients, self._trainable_variables), 1.0
)
else:
# gradient acummulation here.
per_replica_losses = 0.0
for i in tf.range(self.config["gradient_accumulation_steps"]):
reduced_batch = {
k: v[
i
* self.config["batch_size"] : (i + 1)
* self.config["batch_size"]
]
for k, v in batch.items()
}
# run 1 step accumulate
reduced_batch_losses = self._calculate_gradient_per_batch(reduced_batch)
# sum per_replica_losses
per_replica_losses += reduced_batch_losses
gradients = self._gradient_accumulator.gradients
self._optimizer.apply_gradients(
zip(gradients, self._trainable_variables), 1.0
)
self._gradient_accumulator.reset()
return per_replica_losses
@abc.abstractmethod
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
per_example_losses = 0.0
dict_metrics_losses = {}
return per_example_losses, dict_metrics_losses
def _eval_epoch(self):
"""Evaluate model one epoch."""
logging.info(f"(Steps: {self.steps}) Start evaluation.")
# calculate loss for each batch
for eval_steps_per_epoch, batch in enumerate(
tqdm(self.eval_data_loader, desc="[eval]"), 1
):
# eval one step
self.one_step_evaluate(batch)
if eval_steps_per_epoch <= self.config["num_save_intermediate_results"]:
# save intermedia
self.generate_and_save_intermediate_result(batch)
logging.info(
f"(Steps: {self.steps}) Finished evaluation "
f"({eval_steps_per_epoch} steps per epoch)."
)
# average loss
for key in self.eval_metrics.keys():
logging.info(
f"(Steps: {self.steps}) eval_{key} = {self.eval_metrics[key].result():.4f}."
)
# record
self._write_to_tensorboard(self.eval_metrics, stage="eval")
# reset
self.reset_states_eval()
def _one_step_evaluate_per_replica(self, batch):
outputs = self._model(**batch, training=False)
_, dict_metrics_losses = self.compute_per_example_losses(batch, outputs)
self.update_eval_metrics(dict_metrics_losses)
def _one_step_evaluate(self, batch):
self._strategy.run(self._one_step_evaluate_per_replica, args=(batch,))
def _one_step_predict_per_replica(self, batch):
outputs = self._model(**batch, training=False)
return outputs
def _one_step_predict(self, batch):
outputs = self._strategy.run(self._one_step_predict_per_replica, args=(batch,))
return outputs
@abc.abstractmethod
def generate_and_save_intermediate_result(self, batch):
return
def create_checkpoint_manager(self, saved_path=None, max_to_keep=10):
"""Create checkpoint management."""
if saved_path is None:
saved_path = self.config["outdir"] + "/checkpoints/"
os.makedirs(saved_path, exist_ok=True)
self.saved_path = saved_path
self.ckpt = tf.train.Checkpoint(
steps=tf.Variable(1), epochs=tf.Variable(1), optimizer=self.get_optimizer()
)
self.ckp_manager = tf.train.CheckpointManager(
self.ckpt, saved_path, max_to_keep=max_to_keep
)
def save_checkpoint(self):
"""Save checkpoint."""
self.ckpt.steps.assign(self.steps)
self.ckpt.epochs.assign(self.epochs)
self.ckp_manager.save(checkpoint_number=self.steps)
utils.save_weights(
self._model,
self.saved_path + "model-{}.h5".format(self.steps)
)
def load_checkpoint(self, pretrained_path):
"""Load checkpoint."""
self.ckpt.restore(pretrained_path)
self.steps = self.ckpt.steps.numpy()
self.epochs = self.ckpt.epochs.numpy()
self._optimizer = self.ckpt.optimizer
# re-assign iterations (global steps) for optimizer.
self._optimizer.iterations.assign(tf.cast(self.steps, tf.int64))
# load weights.
utils.load_weights(
self._model,
self.saved_path + "model-{}.h5".format(self.steps)
)
def _check_train_finish(self):
"""Check training finished."""
if self.steps >= self.config["train_max_steps"]:
self.finish_train = True
def _check_log_interval(self):
"""Log to tensorboard."""
if self.steps % self.config["log_interval_steps"] == 0:
for metric_name in self.list_metrics_name:
logging.info(
f"(Step: {self.steps}) train_{metric_name} = {self.train_metrics[metric_name].result():.4f}."
)
self._write_to_tensorboard(self.train_metrics, stage="train")
# reset
self.reset_states_train()
def fit(self, train_data_loader, valid_data_loader, saved_path, resume=None):
self.set_train_data_loader(train_data_loader)
self.set_eval_data_loader(valid_data_loader)
self.train_data_loader = self._strategy.experimental_distribute_dataset(
self.train_data_loader
)
self.eval_data_loader = self._strategy.experimental_distribute_dataset(
self.eval_data_loader
)
with self._strategy.scope():
self.create_checkpoint_manager(saved_path=saved_path, max_to_keep=10000)
if len(resume) > 1:
self.load_checkpoint(resume)
logging.info(f"Successfully resumed from {resume}.")
self.run()
| 36,562 | 35.165183 | 113 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/trainers/__init__.py | from tensorflow_tts.trainers.base_trainer import GanBasedTrainer, Seq2SeqBasedTrainer
| 86 | 42.5 | 85 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_hifigan.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
import tensorflow as tf
from tensorflow_tts.configs import (
HifiGANDiscriminatorConfig,
HifiGANGeneratorConfig,
MelGANDiscriminatorConfig,
)
from tensorflow_tts.models import (
TFHifiGANGenerator,
TFHifiGANMultiPeriodDiscriminator,
TFMelGANMultiScaleDiscriminator,
)
from examples.hifigan.train_hifigan import TFHifiGANDiscriminator
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def make_hifigan_generator_args(**kwargs):
defaults = dict(
out_channels=1,
kernel_size=7,
filters=128,
use_bias=True,
upsample_scales=[8, 8, 2, 2],
stacks=3,
stack_kernel_size=[3, 7, 11],
stack_dilation_rate=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
use_final_nolinear_activation=True,
is_weight_norm=True,
initializer_seed=42,
)
defaults.update(kwargs)
return defaults
def make_hifigan_discriminator_args(**kwargs):
defaults_multisperiod = dict(
out_channels=1,
period_scales=[2, 3, 5, 7, 11],
n_layers=5,
kernel_size=5,
strides=3,
filters=8,
filter_scales=4,
max_filters=1024,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
is_weight_norm=True,
initializer_seed=42,
)
defaults_multisperiod.update(kwargs)
defaults_multiscale = dict(
out_channels=1,
scales=3,
downsample_pooling="AveragePooling1D",
downsample_pooling_params={"pool_size": 4, "strides": 2,},
kernel_sizes=[5, 3],
filters=16,
max_downsample_filters=1024,
use_bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
)
defaults_multiscale.update(kwargs)
return [defaults_multisperiod, defaults_multiscale]
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss",
[
({}, {}, {}),
({"kernel_size": 3}, {}, {}),
({"filters": 1024}, {}, {}),
({"stack_kernel_size": [1, 2, 3]}, {}, {}),
({"stack_kernel_size": [3, 5, 7], "stacks": 3}, {}, {}),
({"upsample_scales": [4, 4, 4, 4]}, {}, {}),
({"upsample_scales": [8, 8, 2, 2]}, {}, {}),
({"filters": 1024, "upsample_scales": [8, 8, 2, 2]}, {}, {}),
],
)
def test_hifigan_trainable(dict_g, dict_d, dict_loss):
batch_size = 4
batch_length = 4096
args_g = make_hifigan_generator_args(**dict_g)
args_d_p, args_d_s = make_hifigan_discriminator_args(**dict_d)
args_g = HifiGANGeneratorConfig(**args_g)
args_d_p = HifiGANDiscriminatorConfig(**args_d_p)
args_d_s = MelGANDiscriminatorConfig(**args_d_s)
generator = TFHifiGANGenerator(args_g)
discriminator_p = TFHifiGANMultiPeriodDiscriminator(args_d_p)
discriminator_s = TFMelGANMultiScaleDiscriminator(args_d_s)
discriminator = TFHifiGANDiscriminator(discriminator_p, discriminator_s)
| 3,900 | 29.960317 | 76 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_fastspeech.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
import tensorflow as tf
from tensorflow_tts.configs import FastSpeechConfig
from tensorflow_tts.models import TFFastSpeech
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize("new_size", [100, 200, 300])
def test_fastspeech_resize_positional_embeddings(new_size):
config = FastSpeechConfig()
fastspeech = TFFastSpeech(config, name="fastspeech")
fastspeech._build()
fastspeech.save_weights("./test.h5")
fastspeech.resize_positional_embeddings(new_size)
fastspeech.load_weights("./test.h5", by_name=True, skip_mismatch=True)
@pytest.mark.parametrize("num_hidden_layers,n_speakers", [(2, 1), (3, 2), (4, 3)])
def test_fastspeech_trainable(num_hidden_layers, n_speakers):
config = FastSpeechConfig(
encoder_num_hidden_layers=num_hidden_layers,
decoder_num_hidden_layers=num_hidden_layers + 1,
n_speakers=n_speakers,
)
fastspeech = TFFastSpeech(config, name="fastspeech")
optimizer = tf.keras.optimizers.Adam(lr=0.001)
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
attention_mask = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
mel_gts = tf.random.uniform(shape=[1, 10, 80], dtype=tf.float32)
@tf.function
def one_step_training():
with tf.GradientTape() as tape:
mel_outputs_before, _, duration_outputs = fastspeech(
input_ids, speaker_ids, duration_gts, training=True
)
duration_loss = tf.keras.losses.MeanSquaredError()(
duration_gts, duration_outputs
)
mel_loss = tf.keras.losses.MeanSquaredError()(mel_gts, mel_outputs_before)
loss = duration_loss + mel_loss
gradients = tape.gradient(loss, fastspeech.trainable_variables)
optimizer.apply_gradients(zip(gradients, fastspeech.trainable_variables))
tf.print(loss)
import time
for i in range(2):
if i == 1:
start = time.time()
one_step_training()
print(time.time() - start)
| 2,995 | 34.247059 | 86 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_tacotron2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
import yaml
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_tts.configs import Tacotron2Config
from tensorflow_tts.models import TFTacotron2
from tensorflow_tts.utils import return_strategy
from examples.tacotron2.train_tacotron2 import Tacotron2Trainer
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
logging.basicConfig(
level=logging.WARNING,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize(
"var_train_expr, config_path",
[
("embeddings|decoder_cell", "./examples/tacotron2/conf/tacotron2.v1.yaml"),
(None, "./examples/tacotron2/conf/tacotron2.v1.yaml"),
(
"embeddings|decoder_cell",
"./examples/tacotron2/conf/tacotron2.baker.v1.yaml",
),
("embeddings|decoder_cell", "./examples/tacotron2/conf/tacotron2.kss.v1.yaml"),
],
)
def test_tacotron2_train_some_layers(var_train_expr, config_path):
config = Tacotron2Config(n_speakers=5, reduction_factor=1)
model = TFTacotron2(config, name="tacotron2")
model._build()
optimizer = tf.keras.optimizers.Adam(lr=0.001)
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update({"outdir": "./"})
config.update({"var_train_expr": var_train_expr})
STRATEGY = return_strategy()
trainer = Tacotron2Trainer(
config=config, strategy=STRATEGY, steps=0, epochs=0, is_mixed_precision=False,
)
trainer.compile(model, optimizer)
len_trainable_vars = len(trainer._trainable_variables)
all_trainable_vars = len(model.trainable_variables)
if var_train_expr is None:
tf.debugging.assert_equal(len_trainable_vars, all_trainable_vars)
else:
tf.debugging.assert_less(len_trainable_vars, all_trainable_vars)
@pytest.mark.parametrize(
"n_speakers, n_chars, max_input_length, max_mel_length, batch_size",
[(2, 15, 25, 50, 2),],
)
def test_tacotron2_trainable(
n_speakers, n_chars, max_input_length, max_mel_length, batch_size
):
config = Tacotron2Config(n_speakers=n_speakers, reduction_factor=1)
model = TFTacotron2(config, name="tacotron2")
model._build()
# fake input
input_ids = tf.random.uniform(
[batch_size, max_input_length], maxval=n_chars, dtype=tf.int32
)
speaker_ids = tf.convert_to_tensor([0] * batch_size, tf.int32)
mel_gts = tf.random.uniform(shape=[batch_size, max_mel_length, 80])
mel_lengths = np.random.randint(
max_mel_length, high=max_mel_length + 1, size=[batch_size]
)
mel_lengths[-1] = max_mel_length
mel_lengths = tf.convert_to_tensor(mel_lengths, dtype=tf.int32)
stop_tokens = np.zeros((batch_size, max_mel_length), np.float32)
stop_tokens = tf.convert_to_tensor(stop_tokens)
optimizer = tf.keras.optimizers.Adam(lr=0.001)
binary_crossentropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function(experimental_relax_shapes=True)
def one_step_training(input_ids, speaker_ids, mel_gts, mel_lengths):
with tf.GradientTape() as tape:
mel_preds, post_mel_preds, stop_preds, alignment_history = model(
input_ids,
tf.constant([max_input_length, max_input_length]),
speaker_ids,
mel_gts,
mel_lengths,
training=True,
)
loss_before = tf.keras.losses.MeanSquaredError()(mel_gts, mel_preds)
loss_after = tf.keras.losses.MeanSquaredError()(mel_gts, post_mel_preds)
stop_gts = tf.expand_dims(
tf.range(tf.reduce_max(mel_lengths), dtype=tf.int32), 0
) # [1, max_len]
stop_gts = tf.tile(stop_gts, [tf.shape(mel_lengths)[0], 1]) # [B, max_len]
stop_gts = tf.cast(
tf.math.greater_equal(stop_gts, tf.expand_dims(mel_lengths, 1) - 1),
tf.float32,
)
# calculate stop_token loss
stop_token_loss = binary_crossentropy(stop_gts, stop_preds)
loss = stop_token_loss + loss_before + loss_after
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss, alignment_history
for i in range(2):
if i == 1:
start = time.time()
loss, alignment_history = one_step_training(
input_ids, speaker_ids, mel_gts, mel_lengths
)
print(f" > loss: {loss}")
total_runtime = time.time() - start
print(f" > Total run-time: {total_runtime}")
print(f" > Avg run-time: {total_runtime/10}")
| 5,329 | 34.533333 | 87 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_melgan_layers.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_tts.models.melgan import (
TFConvTranspose1d,
TFReflectionPad1d,
TFResidualStack,
)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize("padding_size", [(3), (5)])
def test_padding(padding_size):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
out = TFReflectionPad1d(padding_size=padding_size)(fake_input_1d)
assert np.array_equal(
tf.keras.backend.int_shape(out), [4, 8000 + 2 * padding_size, 256]
)
@pytest.mark.parametrize(
"filters,kernel_size,strides,padding,is_weight_norm",
[(512, 40, 8, "same", False), (768, 15, 8, "same", True)],
)
def test_convtranpose1d(filters, kernel_size, strides, padding, is_weight_norm):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
conv1d_transpose = TFConvTranspose1d(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
is_weight_norm=is_weight_norm,
initializer_seed=42,
)
out = conv1d_transpose(fake_input_1d)
assert np.array_equal(tf.keras.backend.int_shape(out), [4, 8000 * strides, filters])
@pytest.mark.parametrize(
"kernel_size,filters,dilation_rate,use_bias,nonlinear_activation,nonlinear_activation_params,is_weight_norm",
[
(3, 256, 1, True, "LeakyReLU", {"alpha": 0.3}, True),
(3, 256, 3, True, "ReLU", {}, False),
],
)
def test_residualblock(
kernel_size,
filters,
dilation_rate,
use_bias,
nonlinear_activation,
nonlinear_activation_params,
is_weight_norm,
):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
residual_block = TFResidualStack(
kernel_size=kernel_size,
filters=filters,
dilation_rate=dilation_rate,
use_bias=use_bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
is_weight_norm=is_weight_norm,
initializer_seed=42,
)
out = residual_block(fake_input_1d)
assert np.array_equal(tf.keras.backend.int_shape(out), [4, 8000, filters])
| 2,965 | 30.892473 | 113 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
import tensorflow as tf
from tensorflow_tts.configs import MelGANDiscriminatorConfig, MelGANGeneratorConfig
from tensorflow_tts.models import TFMelGANGenerator, TFMelGANMultiScaleDiscriminator
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def make_melgan_generator_args(**kwargs):
defaults = dict(
out_channels=1,
kernel_size=7,
filters=512,
use_bias=True,
upsample_scales=[8, 8, 2, 2],
stack_kernel_size=3,
stacks=3,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
)
defaults.update(kwargs)
return defaults
def make_melgan_discriminator_args(**kwargs):
defaults = dict(
out_channels=1,
scales=3,
downsample_pooling="AveragePooling1D",
downsample_pooling_params={"pool_size": 4, "strides": 2,},
kernel_sizes=[5, 3],
filters=16,
max_downsample_filters=1024,
use_bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss",
[
({}, {}, {}),
({"kernel_size": 3}, {}, {}),
({"filters": 1024}, {}, {}),
({"stack_kernel_size": 5}, {}, {}),
({"stack_kernel_size": 5, "stacks": 2}, {}, {}),
({"upsample_scales": [4, 4, 4, 4]}, {}, {}),
({"upsample_scales": [8, 8, 2, 2]}, {}, {}),
({"filters": 1024, "upsample_scales": [8, 8, 2, 2]}, {}, {}),
],
)
def test_melgan_trainable(dict_g, dict_d, dict_loss):
batch_size = 4
batch_length = 4096
args_g = make_melgan_generator_args(**dict_g)
args_d = make_melgan_discriminator_args(**dict_d)
args_g = MelGANGeneratorConfig(**args_g)
args_d = MelGANDiscriminatorConfig(**args_d)
generator = TFMelGANGenerator(args_g)
discriminator = TFMelGANMultiScaleDiscriminator(args_d)
| 2,841 | 29.55914 | 84 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_mb_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import logging
import os
import numpy as np
import pytest
from tensorflow_tts.configs import MultiBandMelGANGeneratorConfig
from tensorflow_tts.models import TFPQMF, TFMelGANGenerator
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def make_multi_band_melgan_generator_args(**kwargs):
defaults = dict(
out_channels=1,
kernel_size=7,
filters=512,
use_bias=True,
upsample_scales=[8, 8, 2, 2],
stack_kernel_size=3,
stacks=3,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
subbands=4,
tabs=62,
cutoff_ratio=0.15,
beta=9.0,
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"dict_g",
[
{"subbands": 4, "upsample_scales": [2, 4, 8], "stacks": 4, "out_channels": 4},
{"subbands": 4, "upsample_scales": [4, 4, 4], "stacks": 5, "out_channels": 4},
],
)
def test_multi_band_melgan(dict_g):
args_g = make_multi_band_melgan_generator_args(**dict_g)
args_g = MultiBandMelGANGeneratorConfig(**args_g)
generator = TFMelGANGenerator(args_g, name="multi_band_melgan")
generator._build()
pqmf = TFPQMF(args_g, name="pqmf")
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
fake_y = tf.random.uniform(shape=[1, 100 * 256, 1], dtype=tf.float32)
y_hat_subbands = generator(fake_mels)
y_hat = pqmf.synthesis(y_hat_subbands)
y_subbands = pqmf.analysis(fake_y)
assert np.shape(y_subbands) == np.shape(y_hat_subbands)
assert np.shape(fake_y) == np.shape(y_hat)
| 2,401 | 29.025 | 86 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_parallel_wavegan.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
import tensorflow as tf
from tensorflow_tts.configs import (
ParallelWaveGANGeneratorConfig,
ParallelWaveGANDiscriminatorConfig,
)
from tensorflow_tts.models import (
TFParallelWaveGANGenerator,
TFParallelWaveGANDiscriminator,
)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def make_pwgan_generator_args(**kwargs):
defaults = dict(
out_channels=1,
kernel_size=3,
n_layers=30,
stacks=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
aux_context_window=2,
dropout_rate=0.0,
use_bias=True,
use_causal_conv=False,
upsample_conditional_features=True,
upsample_params={"upsample_scales": [4, 4, 4, 4]},
initializer_seed=42,
)
defaults.update(kwargs)
return defaults
def make_pwgan_discriminator_args(**kwargs):
defaults = dict(
out_channels=1,
kernel_size=3,
n_layers=10,
conv_channels=64,
use_bias=True,
dilation_factor=1,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
initializer_seed=42,
apply_sigmoid_at_last=False,
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"dict_g, dict_d",
[
({}, {}),
(
{"kernel_size": 3, "aux_context_window": 5, "residual_channels": 128},
{"dilation_factor": 2},
),
({"stacks": 4, "n_layers": 40}, {"conv_channels": 128}),
],
)
def test_melgan_trainable(dict_g, dict_d):
random_c = tf.random.uniform(shape=[4, 32, 80], dtype=tf.float32)
args_g = make_pwgan_generator_args(**dict_g)
args_d = make_pwgan_discriminator_args(**dict_d)
args_g = ParallelWaveGANGeneratorConfig(**args_g)
args_d = ParallelWaveGANDiscriminatorConfig(**args_d)
generator = TFParallelWaveGANGenerator(args_g)
generator._build()
discriminator = TFParallelWaveGANDiscriminator(args_d)
discriminator._build()
generated_audios = generator(random_c, training=True)
discriminator(generated_audios)
generator.summary()
discriminator.summary()
| 2,967 | 26.481481 | 82 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import yaml
import pytest
import tensorflow as tf
from tensorflow_tts.configs import FastSpeech2Config
from tensorflow_tts.models import TFFastSpeech2
from tensorflow_tts.utils import return_strategy
from examples.fastspeech2.train_fastspeech2 import FastSpeech2Trainer
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize("new_size", [100, 200, 300])
def test_fastspeech_resize_positional_embeddings(new_size):
config = FastSpeech2Config()
fastspeech2 = TFFastSpeech2(config, name="fastspeech")
fastspeech2._build()
fastspeech2.save_weights("./test.h5")
fastspeech2.resize_positional_embeddings(new_size)
fastspeech2.load_weights("./test.h5", by_name=True, skip_mismatch=True)
@pytest.mark.parametrize(
"var_train_expr, config_path",
[
(None, "./examples/fastspeech2/conf/fastspeech2.v1.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.v1.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.v2.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.baker.v2.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.kss.v1.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.kss.v2.yaml"),
],
)
def test_fastspeech2_train_some_layers(var_train_expr, config_path):
config = FastSpeech2Config(n_speakers=5)
model = TFFastSpeech2(config)
model._build()
optimizer = tf.keras.optimizers.Adam(lr=0.001)
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update({"outdir": "./"})
config.update({"var_train_expr": var_train_expr})
STRATEGY = return_strategy()
trainer = FastSpeech2Trainer(
config=config, strategy=STRATEGY, steps=0, epochs=0, is_mixed_precision=False,
)
trainer.compile(model, optimizer)
len_trainable_vars = len(trainer._trainable_variables)
all_trainable_vars = len(model.trainable_variables)
if var_train_expr is None:
tf.debugging.assert_equal(len_trainable_vars, all_trainable_vars)
else:
tf.debugging.assert_less(len_trainable_vars, all_trainable_vars)
@pytest.mark.parametrize("num_hidden_layers,n_speakers", [(2, 1), (3, 2), (4, 3)])
def test_fastspeech_trainable(num_hidden_layers, n_speakers):
config = FastSpeech2Config(
encoder_num_hidden_layers=num_hidden_layers,
decoder_num_hidden_layers=num_hidden_layers + 1,
n_speakers=n_speakers,
)
fastspeech2 = TFFastSpeech2(config, name="fastspeech")
optimizer = tf.keras.optimizers.Adam(lr=0.001)
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
attention_mask = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
f0_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.float32)
energy_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.float32)
mel_gts = tf.random.uniform(shape=[1, 10, 80], dtype=tf.float32)
@tf.function
def one_step_training():
with tf.GradientTape() as tape:
mel_outputs_before, _, duration_outputs, _, _ = fastspeech2(
input_ids, speaker_ids, duration_gts, f0_gts, energy_gts, training=True,
)
duration_loss = tf.keras.losses.MeanSquaredError()(
duration_gts, duration_outputs
)
mel_loss = tf.keras.losses.MeanSquaredError()(mel_gts, mel_outputs_before)
loss = duration_loss + mel_loss
gradients = tape.gradient(loss, fastspeech2.trainable_variables)
optimizer.apply_gradients(zip(gradients, fastspeech2.trainable_variables))
tf.print(loss)
import time
for i in range(2):
if i == 1:
start = time.time()
one_step_training()
print(time.time() - start)
| 4,805 | 35.969231 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_auto.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
import tensorflow as tf
from tensorflow_tts.inference import AutoConfig
from tensorflow_tts.inference import AutoProcessor
from tensorflow_tts.inference import TFAutoModel
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize(
"mapper_path",
[
"./test/files/baker_mapper.json",
"./test/files/kss_mapper.json",
"./test/files/libritts_mapper.json",
"./test/files/ljspeech_mapper.json",
]
)
def test_auto_processor(mapper_path):
processor = AutoProcessor.from_pretrained(pretrained_path=mapper_path)
processor.save_pretrained("./test_saved")
processor = AutoProcessor.from_pretrained("./test_saved/processor.json")
@pytest.mark.parametrize(
"config_path",
[
"./examples/fastspeech/conf/fastspeech.v1.yaml",
"./examples/fastspeech/conf/fastspeech.v3.yaml",
"./examples/fastspeech2/conf/fastspeech2.v1.yaml",
"./examples/fastspeech2/conf/fastspeech2.v2.yaml",
"./examples/fastspeech2/conf/fastspeech2.kss.v1.yaml",
"./examples/fastspeech2/conf/fastspeech2.kss.v2.yaml",
"./examples/melgan/conf/melgan.v1.yaml",
"./examples/melgan_stft/conf/melgan_stft.v1.yaml",
"./examples/multiband_melgan/conf/multiband_melgan.v1.yaml",
"./examples/tacotron2/conf/tacotron2.v1.yaml",
"./examples/tacotron2/conf/tacotron2.kss.v1.yaml",
"./examples/parallel_wavegan/conf/parallel_wavegan.v1.yaml",
"./examples/hifigan/conf/hifigan.v1.yaml",
"./examples/hifigan/conf/hifigan.v2.yaml",
]
)
def test_auto_model(config_path):
config = AutoConfig.from_pretrained(pretrained_path=config_path)
model = TFAutoModel.from_pretrained(pretrained_path=None, config=config)
# test save_pretrained
config.save_pretrained("./test_saved")
model.save_pretrained("./test_saved")
# test from_pretrained
config = AutoConfig.from_pretrained("./test_saved/config.yml")
model = TFAutoModel.from_pretrained("./test_saved/model.h5", config=config)
| 2,820 | 34.708861 | 79 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_base_processor.py | import pytest
from tensorflow_tts.processor.base_processor import BaseProcessor, DataProcessorError
import string
from dataclasses import dataclass
from shutil import copyfile
@dataclass
class LJ(BaseProcessor):
def get_one_sample(self, item):
sample = {
"raw_text": None,
"text_ids": None,
"audio": None,
"utt_id": None,
"speaker_name": None,
"rate": None,
}
return sample
def text_to_sequence(self, text):
return ["0"]
def setup_eos_token(self):
return None
def save_pretrained(self, saved_path):
return super().save_pretrained(saved_path)
@pytest.fixture
def processor(tmpdir):
copyfile("test/files/train.txt", f"{tmpdir}/train.txt")
processor = LJ(data_dir=tmpdir, symbols=list(string.ascii_lowercase))
return processor
@pytest.fixture
def mapper_processor(tmpdir):
copyfile("test/files/train.txt", f"{tmpdir}/train.txt")
copyfile("test/files/mapper.json", f"{tmpdir}/mapper.json")
processor = LJ(data_dir=tmpdir, loaded_mapper_path=f"{tmpdir}/mapper.json")
return processor
def test_items_creation(processor):
# Check text
assert processor.items[0][0] == "in fact its just a test."
assert processor.items[1][0] == "in fact its just a speaker number one."
# Check path
assert processor.items[0][1].split("/")[-1] == "libri1.wav"
assert processor.items[1][1].split("/")[-1] == "libri2.wav"
# Check speaker name
assert processor.items[0][2] == "One"
assert processor.items[1][2] == "Two"
def test_mapper(processor):
# check symbol to id mapper
assert processor.symbol_to_id["a"] == 0
# check id to symbol mapper
assert processor.id_to_symbol[0] == "a"
# check speaker mapper
assert processor.speakers_map["One"] == 0
assert processor.speakers_map["Two"] == 1
def test_adding_symbols(processor):
# check symbol to id mapper
assert processor.symbol_to_id["a"] == 0
# check id to symbol mapper
assert processor.id_to_symbol[0] == "a"
old_processor_len = len(processor.symbols)
# Test adding new symbol
processor.add_symbol("O_O")
assert processor.symbol_to_id["a"] == 0
assert (
processor.symbol_to_id["O_O"] == len(processor.symbols) - 1
) # new symbol should have last id
assert processor.id_to_symbol[0] == "a"
assert processor.id_to_symbol[len(processor.symbols) - 1] == "O_O"
assert old_processor_len == len(processor.symbols) - 1
def test_loading_mapper(mapper_processor):
assert mapper_processor.symbol_to_id["a"] == 0
assert mapper_processor.symbol_to_id["@ph"] == 2
assert mapper_processor.speakers_map["test_one"] == 0
assert mapper_processor.speakers_map["test_two"] == 1
assert mapper_processor.id_to_symbol[0] == "a"
assert mapper_processor.id_to_symbol[2] == "@ph"
# Test failed creation
with pytest.raises(DataProcessorError):
failed = LJ(data_dir="test/files")
| 3,035 | 27.111111 | 85 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/setup.py | from setuptools import setup
config = {
'name': 'sgmcmc_ssm',
'version': '0.1',
'url': 'https://github.com/aicherc/sgmcmc_ssm_code',
'description': 'SGMCMC for SSM Code',
'author': 'Christopher Aicher',
'license': 'MIT License',
'packages': ['sgmcmc_ssm'],
}
setup(**config)
# Build Extensions: python setup.py build_ext --inplace
# Develop: python setup.py develop
# Remove: python setup.py develop --uninstall
| 441 | 26.625 | 56 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/nonlinear_ssm_pf_experiment_scripts/lgssm/driver.py | """ Experiment Driver
Call python <path_to_this_file>.py --help to see documentation
"""
import os
import sys
sys.path.append(os.getcwd()) # Fix Python Path
import numpy as np
import pandas as pd
import joblib
import time
import argparse
from tqdm import tqdm
import functools
import matplotlib
matplotlib.use('Agg') # For Cluster
import matplotlib.pyplot as plt
import seaborn as sns
import logging # For Logs
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
logger = logging.getLogger(name=__name__)
from sgmcmc_ssm.evaluator import (
SamplerEvaluator, OfflineEvaluator, half_average_parameters_list,
)
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
metric_function_parameters,
metric_compare_x,
noisy_logjoint_loglike_metric,
)
from sgmcmc_ssm.driver_utils import (
script_builder, make_path, TqdmToLogger,
pandas_write_df_to_csv, joblib_write_to_file,
)
from sgmcmc_ssm.plotting_utils import (
plot_metrics, plot_trace_plot,
)
from sgmcmc_ssm.models.lgssm import (
LGSSMSampler,
LGSSMPrior,
LGSSMPreconditioner,
generate_lgssm_data,
)
DEFAULT_OPTIONS = dict(
model_type = "LGSSM",
prior_variance = 100.0,
max_num_iters = 1000000,
max_time = 60,
eval_freq = 1,
max_eval_iterations = 1000,
max_eval_time = 60,
steps_per_iteration = 1,
checkpoint_num_iters = 1000,
checkpoint_time = 60*30,
)
## Script Argument Parser
def construct_parser():
""" Define script argument parser """
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
)
# Key Value Args
parser.add_argument("--experiment_folder",
help="path to experiment",
type=str,
)
parser.add_argument("--experiment_id",
default=0,
help="id of experiment (optional)",
type=int,
)
parser.add_argument("--path_to_additional_args", default="",
help="additional arguments to pass to setup",
type=str,
)
# Action Args
parser.add_argument("--setup", action='store_const', const=True,
help="flag for whether to setup data, inits, and fit/eval args",
)
parser.add_argument("--fit", action='store_const', const=True,
help="flag for whether to run sampler/optimization",
)
parser.add_argument("--eval", default="",
help="run evaluation of parameters on target data (e.g. 'train', 'test', 'half_avg_train')",
type=str,
)
parser.add_argument("--trace_eval", default="",
help="run evaluation on parameter trace (e.g. 'ksd', 'kstest')",
type=str,
)
parser.add_argument("--process_out", action='store_const', const=True,
help="flag for whether to aggregate output",
)
parser.add_argument("--make_plots", action='store_const', const=True,
help="flag for whether to plot aggregated output",
)
parser.add_argument("--make_scripts", action='store_const', const=True,
help="flag for setup to only recreate scripts",
)
return parser
## Main Dispatcher
def main(experiment_folder, experiment_id, path_to_additional_args,
setup, fit, eval, trace_eval, process_out, make_plots,
make_scripts, **kwargs):
""" Main Dispatcher see construct_parser for argument help """
if kwargs:
logger.warning("Unused kwargs: {0}".format(kwargs))
out = {}
if setup:
out['setup'] = do_setup(experiment_folder, path_to_additional_args)
make_scripts = True
logging.info("Extracting Options for experiment id {0}".format(
experiment_id))
path_to_arg_list = os.path.join(experiment_folder, "in", "options.p")
arg_list = joblib.load(path_to_arg_list)
experiment_options = arg_list[experiment_id]
logger.info("Experiment Options: {0}".format(experiment_options))
if make_scripts:
out['make_scripts'] = do_make_scripts(
experiment_folder, path_to_additional_args, arg_list)
if fit:
out['fit'] = do_fit(**experiment_options)
if eval != "":
for eval_ in eval.split(","):
if eval_ in ['train', 'half_avg_train', 'test', 'half_avg_test']:
out['eval_{0}'.format(eval_)] = do_eval(
target=eval_,
**experiment_options,
)
else:
raise ValueError("Unrecognized 'eval' target {0}".format(eval_))
if trace_eval != "":
for trace_eval_ in trace_eval.split(","):
if trace_eval_ == "ksd":
out['trace_eval_{0}'.format(trace_eval)] = do_eval_ksd(
**experiment_options,
)
elif trace_eval_ == "ess":
raise NotImplementedError()
elif trace_eval_ == "kstest":
out['trace_eval_{0}'.format(trace_eval)] = do_eval_ks_test(
**experiment_options,
)
else:
raise ValueError(
"Unrecognized 'trace_eval' target {0}".format(trace_eval_))
if process_out:
out['process_out'] = do_process_out(experiment_folder)
if make_plots:
out['make_plots'] = do_make_plots(experiment_folder)
if len(out.keys()) == 0:
raise ValueError("No Flags Set")
return out
## Setup Function
def do_setup(experiment_folder, path_to_additional_args):
""" Setup Shell Scripts for Experiment """
additional_args = joblib.load(path_to_additional_args)
# Setup Data
logger.info("Setting Up Data")
data_args = setup_train_test_data(experiment_folder, **additional_args)
# Setup
logger.info("Saving Experiment Options per ID")
sampler_args = additional_args['sampler_args']
arg_list = dict_product(sampler_args, data_args)
options_df = setup_options(experiment_folder, arg_list)
return options_df
## Make Scripts
def do_make_scripts(experiment_folder, path_to_additional_args, arg_list):
additional_args = joblib.load(path_to_additional_args)
options_df = pd.DataFrame(arg_list)
# Setup Shell Scripts
logger.info("Setting up Shell Scripts")
shell_args_base = [{
'--experiment_folder': experiment_folder,
'--experiment_id': experiment_id,
} for experiment_id in options_df['experiment_id']
]
# Fit Script
script_builder(
script_name = "fit",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--fit": None}) for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('fit_script_kwargs', {})
)
# Eval Scripts
script_builder(
script_name = "eval_train",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--eval": 'half_avg_train'})
for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('eval_script_kwargs', {})
)
script_builder(
script_name = "eval_test",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--eval": 'half_avg_test'})
for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('eval_script_kwargs', {})
)
script_builder(
script_name = "trace_eval",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--trace_eval": 'kstest,ksd'})
for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('eval_script_kwargs', {})
)
# Process Script
script_builder(
script_name = "process_out",
python_script_path = additional_args['python_script_path'],
python_script_args = [{
"--experiment_folder": experiment_folder,
"--process_out": None,
}],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('process_out_script_kwargs', {})
)
# Plot Script
script_builder(
script_name = "make_plots",
python_script_path = additional_args['python_script_path'],
python_script_args = [{
"--experiment_folder": experiment_folder,
"--make_plots": None,
}],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('make_plots_script_kwargs', {})
)
# Run All Script
path_to_runall_script = os.path.join(
additional_args['path_to_shell_script'], 'run_all.sh')
with open(path_to_runall_script, 'w') as f:
f.write("#!/bin/bash\n")
f.write("cd {0}\n".format(additional_args['project_root']))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'fit.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'eval_train.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'eval_test.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'process_out.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'make_plots.sh')))
os.chmod(path_to_runall_script, 0o775)
logger.info("Run All Script at {0}".format(path_to_runall_script))
# Clear All Script
path_to_clear_script = os.path.join(
additional_args['path_to_shell_script'], 'clear_all.sh')
with open(path_to_clear_script, 'w') as f:
f.write("#!/bin/bash\n")
f.write("cd {0}\n".format(
os.path.join(additional_args['project_root'], experiment_folder)))
f.write("rm -r ./in ./out ./scratch ./fig\n".format(os.path.basename(
path_to_additional_args)))
f.write("cd {0}\n".format(
os.path.join(additional_args['project_root'],
additional_args['path_to_shell_script'])
))
f.write("rm -r ./fit ./eval_train ./eval_test ./process_out ./make_plots ./trace_eval\n")
os.chmod(path_to_clear_script, 0o775)
logger.info("Clear Script at {0}".format(path_to_clear_script))
return options_df
## Fit Module
def do_fit(
experiment_name, experiment_id,
experiment_folder, path_to_data, path_to_init,
model_type, prior_variance,
inference_method, eval_freq,
max_num_iters, steps_per_iteration, max_time,
checkpoint_num_iters, checkpoint_time,
**kwargs):
""" Fit function
Saves list of parameters + runtimes to <experiment_folder>/out/fit/
Args:
experiment_name, experiment_id - experiment id parameters
experiment_folder, path_to_data, path_to_init - paths to input + output
model_type, prior_variance - args for get_model_sampler_prior()
inference_method - get_model_sampler_step()
eval_freq - how frequently to eval metric funcs
max_num_iters, steps_per_iteration, max_time - how long to fit/train
checkpoint_num_iters, checkpoint_time - how frequent to checkpoint
**kwargs - contains inference_method kwargs
"""
logger.info("Beginning Experiment {0} for id:{1}".format(
experiment_name, experiment_id))
## TEMPORARY!!!
#if kwargs.get("subsequence_length") != -1:
# logger.info("SKIPPING RUNNING for full sequence method")
# return
Sampler, Prior = get_model_sampler_prior(model_type)
# Make Paths
path_to_out = os.path.join(experiment_folder, "out", "fit")
path_to_fig = os.path.join(experiment_folder, "fig", "fit",
"{0:0>4}".format(experiment_id))
path_to_scratch = os.path.join(experiment_folder, 'scratch')
path_to_fit_state = os.path.join(path_to_scratch,
"fit_{0:0>4}_state.p".format(experiment_id))
make_path(path_to_out)
make_path(path_to_fig)
make_path(path_to_scratch)
# Load Train Data
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
observations = data['observations']
# Set Metric + Sample Functions for Evaluator
parameter_names = ['phi', 'sigma', 'tau']
sample_functions = [sample_function_parameters(parameter_names)]
metric_functions = []
if 'parameters' in data.keys():
metric_functions += [
metric_function_parameters(
parameter_names = parameter_names,
target_values = [getattr(data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names = ['logmse' for _ in parameter_names],
)
]
# Check if existing sampler and evaluator state exists
if os.path.isfile(path_to_fit_state):
logger.info("Continuing Evaluation from {0}".format(path_to_fit_state))
fit_state = joblib.load(path_to_fit_state)
init_parameters = fit_state['parameters']
parameters_list = fit_state['parameters_list']
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance, **init_parameters.dim
),
parameters=init_parameters,
)
evaluator = SamplerEvaluator(sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
init_state=fit_state['evaluator_state'],
)
else:
logger.info("Getting Init at {0}".format(path_to_init))
init_parameters = joblib.load(path_to_init)
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance, **init_parameters.dim
),
parameters=init_parameters,
)
evaluator = SamplerEvaluator(sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
parameters_list = [
dict(
iteration=evaluator.iteration,
elapsed_time=evaluator.elapsed_time,
parameters=evaluator.sampler.parameters.copy()
)
]
# Save Init Figures
logger.info("Saving Init Figures")
process_checkpoint(
evaluator=evaluator,
data=data,
parameters_list=parameters_list,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
checkpoint_num=evaluator.iteration,
)
# Sampler Funcs
sampler_func_names, sampler_func_kwargs = get_model_sampler_step(
model_type=model_type,
inference_method=inference_method,
steps_per_iteration=steps_per_iteration,
**kwargs
)
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(range(evaluator.iteration, max_num_iters),
file=tqdm_out, mininterval=60)
last_checkpoint_time = time.time()
last_eval_time = time.time() - eval_freq
start_time = time.time()
max_time_exceeded = False
for step in p_bar:
# Execute sampler_func_names
if (time.time() - start_time > max_time):
logger.info("Max Time Elapsed: {0} > {1}".format(
time.time() - start_time, max_time))
max_time_exceeded = True
try:
if (time.time() - last_eval_time > eval_freq) or \
(step == max_num_iters -1) or max_time_exceeded:
evaluator.evaluate_sampler_step(
sampler_func_names, sampler_func_kwargs, evaluate=True,
)
parameters_list.append(
dict(
iteration=evaluator.iteration,
elapsed_time=evaluator.elapsed_time,
parameters=evaluator.sampler.parameters.copy()
)
)
last_eval_time=time.time()
else:
evaluator.evaluate_sampler_step(
sampler_func_names, sampler_func_kwargs, evaluate=False,
)
except:
# Checkpoint On Error
process_checkpoint(
evaluator=evaluator,
data=data,
parameters_list=parameters_list,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
checkpoint_num=evaluator.iteration,
)
fit_state = evaluator.get_state()
logger.info("Saving Evaluator State to {0}".format(
path_to_fit_state))
joblib_write_to_file(
dict(evaluator_state=fit_state,
parameters=evaluator.sampler.parameters,
parameters_list=parameters_list),
path_to_fit_state)
raise RuntimeError()
# Check to Checkpoint Current Results
if (step % checkpoint_num_iters == 0) or \
(time.time() - last_checkpoint_time > checkpoint_time) or \
(step == max_num_iters-1) or max_time_exceeded:
process_checkpoint(
evaluator=evaluator,
data=data,
parameters_list=parameters_list,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
checkpoint_num=evaluator.iteration,
)
fit_state = evaluator.get_state()
logger.info("Saving Evaluator State to {0}".format(
path_to_fit_state))
joblib_write_to_file(
dict(evaluator_state=fit_state,
parameters=evaluator.sampler.parameters,
parameters_list=parameters_list),
path_to_fit_state)
# Reset Checkpoint Clock
last_checkpoint_time = time.time()
if max_time_exceeded:
break
return evaluator
## Evaluate Module
def do_eval(target,
experiment_name, experiment_id,
experiment_folder,
model_type, prior_variance,
max_eval_iterations, max_eval_time,
checkpoint_num_iters, checkpoint_time,
**kwargs):
logger.info("Beginning Evaluation of {0} id:{1} on {2}".format(
experiment_name, experiment_id, target,
))
Sampler, Prior = get_model_sampler_prior(model_type)
# Paths
path_to_parameters_list = os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(experiment_id))
path_to_out = os.path.join(experiment_folder, "out",
"eval{0}".format(target))
path_to_fig = os.path.join(experiment_folder, "fig",
"eval{0}".format(target),"{0:0>4}".format(experiment_id))
path_to_scratch = os.path.join(experiment_folder, 'scratch')
path_to_eval_state = os.path.join(path_to_scratch,
"eval{1}_{0:0>4}_state.p".format(experiment_id, target))
make_path(path_to_out)
make_path(path_to_fig)
make_path(path_to_scratch)
# Get Data
if target in ["train", "half_avg_train"]:
path_to_data = kwargs['path_to_data']
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
elif target in ["test", "half_avg_test"]:
path_to_data = kwargs['path_to_test_data']
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
else:
raise ValueError("Invalid target {0}".format(target))
# Setup Sampler
logger.info("Setting up Sampler")
path_to_init = kwargs['path_to_init']
init_parameters = joblib.load(path_to_init)
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
observations = data['observations']
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance,
**init_parameters.dim
),
)
# Set Metric + Sample Functions for Evaluator
parameter_names = ['phi', 'sigma', 'tau']
sample_functions = [sample_function_parameters(parameter_names)]
metric_functions = [noisy_logjoint_loglike_metric()]
if 'parameters' in data.keys():
metric_functions += [
metric_function_parameters(
parameter_names = parameter_names,
target_values = [getattr(data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names = ['logmse' for _ in parameter_names],
)
]
if 'latent_vars' in data.keys():
metric_functions += [metric_compare_x(true_x=data['latent_vars'])]
# Get parameters_list
logger.info("Getting Params from {0}".format(path_to_parameters_list))
parameters_list = joblib.load(path_to_parameters_list)
if target in ["half_avg_train", "half_avg_test"]:
logger.info("Calculating Running Average of Parameters")
parameters_list['parameters'] = \
half_average_parameters_list(parameters_list['parameters'])
# Setup Evaluator
logger.info("Setting up Evaluator")
# Check if existing evaluator state exists
if os.path.isfile(path_to_eval_state):
logger.info("Continuing Evaluation from {0}".format(path_to_eval_state))
eval_state = joblib.load(path_to_eval_state)
evaluator = OfflineEvaluator(sampler,
parameters_list=parameters_list,
metric_functions=metric_functions,
sample_functions=sample_functions,
init_state = eval_state,
)
else:
logger.info("Initializing Evaluation from scratch")
evaluator = OfflineEvaluator(sampler,
parameters_list=parameters_list,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
process_checkpoint(
evaluator=evaluator,
data=data,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
)
# Evaluation
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
logger.info("Found {0} parameters to eval".format(evaluator.num_to_eval()))
max_iterations = min([max_eval_iterations, evaluator.num_to_eval()])
p_bar = tqdm(range(max_iterations), file=tqdm_out, mininterval=60)
last_checkpoint_time = time.time() - checkpoint_time
start_time = time.time()
max_time_exceeded = False
for p_iter in p_bar:
if (time.time() - start_time > max_eval_time):
logger.info("Max Time Elapsed: {0} > {1}".format(
time.time() - start_time, max_eval_time))
max_time_exceeded = True
# Offline Evaluation
evaluator.evaluate(num_to_eval=1)
if ((time.time()-last_checkpoint_time) > checkpoint_time) or \
(p_iter == max_iterations-1) or max_time_exceeded:
process_checkpoint(
evaluator=evaluator,
data=data,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
)
eval_state = evaluator.get_state()
logger.info("Saving Evaluator State to {0}".format(path_to_eval_state))
joblib_write_to_file(eval_state, path_to_eval_state)
# Reset Checkpoint Clock
last_checkpoint_time = time.time()
if max_time_exceeded:
break
return evaluator
## Combine dfs from individual experiments
def do_process_out(experiment_folder):
""" Process Output
Aggregate files of form .../out/../{id}_{**}.csv
"""
path_to_out = os.path.join(experiment_folder, 'out')
path_to_options = os.path.join(experiment_folder, 'in', 'options.csv')
path_to_processed = os.path.join(experiment_folder, "processed")
make_path(path_to_processed)
subfolders = os.listdir(path_to_out)
# Copy Options to processed
logger.info("Copying Options")
options_df = pd.read_csv(path_to_options, index_col=False)
pandas_write_df_to_csv(options_df,
filename=os.path.join(path_to_processed, "options.csv"),
index=False)
# Try to Aggregate Data [evaltrain+evaltest, fit_metrics[time], options]
aggregated_columns = [
'iteration', 'metric', 'value', 'variable',
'eval_set', 'time', 'iteration_time', 'experiment_id',
]
evaltargets = ['evaltrain', 'evalhalf_avg_train',
'evaltest', 'evalhalf_avg_test']
if ('fit' in subfolders) and (len(set(subfolders).intersection(
set(evaltargets))) > 0):
path_to_aggregated_df = os.path.join(path_to_processed,"aggregated.csv")
logger.info("Aggregating Data to {0}".format(path_to_aggregated_df))
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(list(enumerate(options_df['experiment_id'].unique())),
file=tqdm_out, mininterval=60)
new_csv_flag = True
for ii, experiment_id in p_bar:
eval_df = pd.DataFrame()
for evaltarget in evaltargets:
# LOAD EVAL TARGET FILE
if evaltarget in subfolders:
eval_target_file = os.path.join(path_to_out, evaltarget,
'{0}_metrics.csv'.format(experiment_id),
)
if not is_valid_file(eval_target_file):
continue
eval_target_df = pd.read_csv(
eval_target_file, index_col=False,
).assign(eval_set=evaltarget)
eval_df = pd.concat([eval_df, eval_target_df],
ignore_index=True)
# LOAD FIT FILE
fit_file = os.path.join(path_to_out, 'fit',
'{0}_metrics.csv'.format(experiment_id),
)
if not is_valid_file(fit_file):
continue
fit_df = pd.read_csv(fit_file, index_col=False)
fit_df = fit_df[fit_df['iteration'].isin(eval_df['iteration'])]
iteration_time = fit_df.query("metric == 'time'")[
['iteration', 'value']].rename(
columns={'value':'iteration_time'})
run_time = fit_df.query("metric == 'runtime'")[
['iteration', 'value']].rename(
columns={'value':'time'})
df = pd.merge(eval_df, iteration_time, how='left', on=['iteration'])
df = pd.merge(df, run_time, how='left', on=['iteration'])
df = df.sort_values('iteration').assign(experiment_id=experiment_id)
if new_csv_flag:
df[aggregated_columns].to_csv(path_to_aggregated_df,
index=False)
new_csv_flag = False
else:
df.reindex(columns=aggregated_columns).to_csv(
path_to_aggregated_df, mode='a', header=False,
index=False)
logger.info("Done Aggregating Data: {0}".format(path_to_aggregated_df))
# Also concat out folder csvs
for subfolder in subfolders:
# Only Process Folders
path_to_subfolder = os.path.join(path_to_out, subfolder)
if not os.path.isdir(path_to_subfolder):
logger.info("Ignoring file {0}".format(subfolder))
continue
logger.info("Combining Data in Folder {0}".format(path_to_subfolder))
filenames = os.listdir(path_to_subfolder)
# Combine Metrics
metric_filenames = [name for name in filenames
if name.endswith("metrics.csv")]
path_to_metric_df = os.path.join(path_to_processed,
"{0}_metrics.csv".format(subfolder))
logger.info("Aggregating Data to {0}".format(path_to_metric_df))
# Concat by appending to one large csv
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(list(enumerate(metric_filenames)), file=tqdm_out,
mininterval=60)
new_csv_flag = True
for ii, name in p_bar:
file_name = os.path.join(path_to_subfolder, name)
if not is_valid_file(file_name):
continue
metric_df = pd.read_csv(file_name, index_col=False)
metric_df['experiment_id'] = name.split("_")[0]
if new_csv_flag:
metric_df.to_csv(path_to_metric_df, index=False)
metric_df_columns = list(metric_df.columns.values)
new_csv_flag = False
else:
metric_df.reindex(columns=metric_df_columns).to_csv(
path_to_metric_df, mode='a', header=False, index=False)
logger.info("Metric Data Aggregated to {0}".format(path_to_metric_df))
return
## Make Quick Plots
def do_make_plots(experiment_folder):
""" Make quick plots based on aggregated.csv output of `do_process_out` """
path_to_processed = os.path.join(experiment_folder, 'processed')
path_to_fig = os.path.join(experiment_folder, 'fig', 'processed')
make_path(path_to_fig)
logger.info("Loading Data")
aggregated_df = pd.read_csv(
os.path.join(path_to_processed, 'aggregated.csv'))
options_df = pd.read_csv(
os.path.join(path_to_processed, 'options.csv'))
evaltargets = aggregated_df['eval_set'].unique()
logger.info("Making Plots for {0}".format(evaltargets))
for evaltarget in evaltargets:
logger.info("Processing Data for {0}".format(evaltarget))
sub_df = pd.merge(
aggregated_df[aggregated_df['eval_set'] == evaltarget],
options_df[['method_name', 'experiment_id']],
on='experiment_id',
)
sub_df['variable_metric'] = sub_df['variable'] + '_' + sub_df['metric']
logger.info("Plotting metrics vs time for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='time', y='value', hue='method_name', kind='line',
col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df,
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_time.png'.format(evaltarget)))
logger.info("Plotting metrics vs iteration for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='iteration', y='value', hue='method_name',
kind='line', col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df,
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_iteration.png'.format(evaltarget)))
## After Burnin
if sub_df.query('iteration > 100').shape[0] > 0:
logger.info("Plotting metrics vs time after burnin for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='time', y='value', hue='method_name', kind='line',
col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df.query('iteration > 100'),
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_time_burnin.png'.format(evaltarget)))
logger.info("Plotting metrics vs iteration for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='iteration', y='value', hue='method_name',
kind='line', col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df.query('iteration > 100'),
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_iteration_burnin.png'.format(evaltarget)))
return
## Evaluate Parameter Sample Quality
def do_eval_ksd(
experiment_name, experiment_id,
experiment_folder,
model_type, prior_variance,
max_eval_iterations, max_eval_time,
checkpoint_num_iters, checkpoint_time,
ksd_burnin=0.33, ksd_subsequence_length=1000, ksd_buffer_length=10,
**kwargs):
""" Evaluate the Kernelized Stein Divergence
Pseudocode:
Load Train Data + Setup Sampler
Load Parameter Trace for Experiment Id (apply burnin)
For each parameter, calculate the gradient of the logjoint
(if using noisy gradients, take average over multiple replications)
Compute KSD at each checkpoint
Checkpoints results to out/eval_ksd for each experiment_id
"""
from sgmcmc_ssm.trace_metric_functions import compute_KSD
GRAD_DIM = 3
GRAD_VARIABLES = ['phi', 'sigma', 'tau']
logger.info("Beginning KSD Evaluation of {0} id:{1}".format(
experiment_name, experiment_id,
))
# Paths
path_to_parameters_list = os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(experiment_id))
path_to_out = os.path.join(experiment_folder, "out",
"trace_eval_ksd")
# path_to_fig = os.path.join(experiment_folder, "fig",
# "trace_eval_ksd","{0:0>4}".format(experiment_id))
path_to_scratch = os.path.join(experiment_folder, 'scratch')
path_to_checkpoint_state = os.path.join(path_to_scratch,
"trace_eval_ksd_{0:0>4}_state.p".format(experiment_id))
make_path(path_to_out)
# make_path(path_to_fig)
make_path(path_to_scratch)
# Load Train Data + Setup Sampler
Sampler, Prior = get_model_sampler_prior(model_type)
path_to_data = kwargs['path_to_data']
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
logger.info("Setting up Sampler")
path_to_init = kwargs['path_to_init']
init_parameters = joblib.load(path_to_init)
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
observations = data['observations']
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance,
**init_parameters.dim
),
)
if not os.path.isfile(path_to_checkpoint_state):
# Load parameter_list
logger.info("Getting Params from {0}".format(path_to_parameters_list))
parameters_list = joblib.load(path_to_parameters_list).copy()
# Apply Burnin
parameters_list = parameters_list.iloc[int(parameters_list.shape[0]*ksd_burnin):]
parameters_list['num_ksd_eval'] = 0.0
parameters_list['grad'] = [np.zeros(GRAD_DIM) for _ in range(parameters_list.shape[0])]
metrics_df = pd.DataFrame()
cur_param_index = 0
logger.info("Calculating KSD on {0} parameters".format(
parameters_list.shape[0]))
else:
# Load metrics_df + parameter_list from checkpoint
logger.info("Loading parameters from previous checkpoint")
checkpoint_state = joblib.load(path_to_checkpoint_state)
parameters_list = checkpoint_state['parameters_list']
metrics_df = checkpoint_state['metrics_df']
cur_param_index = checkpoint_state['cur_param_index']
logger.info("Found {0} parameters with at least {1} evals".format(
parameters_list.shape[0], parameters_list['num_ksd_eval'].min()))
# Terminate after 1 pass if exact KSD
if (ksd_subsequence_length == -1) or \
(ksd_subsequence_length >= data['observations'].shape[0]):
if (cur_param_index == 0) and \
(parameters_list['num_ksd_eval'].min() >= 1):
logger.info("Already computed exact KSD")
return metrics_df
max_iterations = max_eval_iterations*parameters_list.shape[0]
start_time = time.time()
max_time_exceeded = False
last_checkpoint_time = time.time()
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(range(max_iterations), file=tqdm_out, mininterval=60)
for ii in p_bar:
if (time.time() - start_time > max_eval_time):
logger.info("Max Time Elapsed: {0} > {1}".format(
time.time() - start_time, max_eval_time))
max_time_exceeded = True
parameters = parameters_list['parameters'].iloc[cur_param_index]
sampler.parameters = parameters
grad = convert_gradient(
gradient = sampler.noisy_gradient(
subsequence_length=ksd_subsequence_length,
buffer_length=ksd_buffer_length,
is_scaled=False),
parameters=parameters,
)
index = parameters_list.index[cur_param_index]
parameters_list.at[index,'grad'] += grad
parameters_list.at[index,'num_ksd_eval'] += 1.0
# Update parameter index for next loop
cur_param_index += 1
if cur_param_index == parameters_list.shape[0]:
logger.info("Completed {0} passes over all parameters".format(
parameters_list['num_ksd_eval'].min()))
cur_param_index = 0
# Checkpoint Results
if ((time.time() - last_checkpoint_time > checkpoint_time) or
(cur_param_index == 0) or (ii+1 == max_eval_iterations) or
max_time_exceeded):
# Compute KSD
sub_list = parameters_list[parameters_list['num_ksd_eval'] > 0]
param_list = sub_list['parameters']
grad_list = sub_list['grad'] / sub_list['num_ksd_eval']
result_dict = compute_KSD(
param_list=param_list.tolist(), grad_list=grad_list.tolist(),
variables=GRAD_VARIABLES,
max_block_size=512, # Block Size for computing kernel
)
new_metric_df = pd.DataFrame([
dict(metric='ksd', variable=key, value=value,
num_samples = cur_param_index-1,
num_evals = parameters_list['num_ksd_eval'].min(),
) for key, value in result_dict.items()
])
metrics_df = pd.concat([metrics_df, new_metric_df],
ignore_index=True)
# Save Metrics DF to CSV
path_to_metrics_file = os.path.join(path_to_out,
"{0}_metrics.csv".format(experiment_id))
logger.info("Saving KSD metrics to {0}".format(path_to_metrics_file))
pandas_write_df_to_csv(metrics_df, path_to_metrics_file, index=False)
# Checkpoint State
logger.info("Saving checkpoint to {0}".format(
path_to_checkpoint_state))
joblib_write_to_file(dict(
parameters_list=parameters_list,
metrics_df=metrics_df,
cur_param_index=cur_param_index,
), path_to_checkpoint_state)
# Reset Checkpoint Clock
last_checkpoint_time = time.time()
# Terminate after 1 pass if exact KSD
if (ksd_subsequence_length == -1) or \
(ksd_subsequence_length >= data['observations'].shape[0]):
if cur_param_index == 0:
break
# Terminate if max_time_exceeded
if max_time_exceeded:
break
return metrics_df
def do_eval_ks_test(
experiment_name, experiment_id,
experiment_folder,
model_type, prior_variance,
max_eval_iterations, max_eval_time,
checkpoint_num_iters, checkpoint_time,
kstest_burnin=0.33, kstest_variables=None,
path_to_reference_parameter_list=None,
**kwargs):
""" Evaluate KS-Test statistic
KS-Test between
experiment_id trace (after burnin) and
reference_parameter_list
Args:
kstest_burnin (double): fraction of samples to discard as burnin
path_to_reference_parameter_list (path): path to reference_parameter_list
(loads using joblib),
if None, then uses all Gibbs samples (after burnin)
Pseudocode:
Load Reference Parameter Trace
Load Parameter Trace for Experiment Id (apply burnin)
For each parameter, calculate the gradient of the logjoint
(if using noisy gradients, take average over multiple replications)
Compute KSD at each checkpoint
Checkpoints results to out/eval_ksd for each experiment_id
"""
from scipy.stats import ks_2samp
if kstest_variables is None:
kstest_variables = ['phi', 'sigma', 'tau']
logger.info("Beginning KS Test Evaluation of {0} id:{1}".format(
experiment_name, experiment_id,
))
# Paths
path_to_parameters_list = os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(experiment_id))
path_to_out = os.path.join(experiment_folder, "out",
"trace_eval_kstest")
path_to_fig = os.path.join(experiment_folder, "fig",
"trace_eval_kstest")
make_path(path_to_out)
make_path(path_to_fig)
# Load Reference Parameter Trace
if path_to_reference_parameter_list is None:
# Check options for path to Gibbs parameter traces
path_to_options = os.path.join(experiment_folder, 'in', 'options.p')
options_df = pd.DataFrame(joblib.load(path_to_options))
sub_df = options_df[options_df['path_to_data'] == kwargs['path_to_data']]
gibbs_options = sub_df[sub_df['inference_method'] == "Gibbs"]
path_to_traces = [
os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(row['experiment_id']))
for _, row in gibbs_options.iterrows()
]
reference_parameters_list = []
for path_to_trace in path_to_traces:
ref_param_list = joblib.load(path_to_trace)[['parameters']]
ref_param_list = ref_param_list.iloc[
int(ref_param_list.shape[0]*kstest_burnin):]
reference_parameters_list.append(ref_param_list)
reference_parameters = pd.concat(reference_parameters_list,
ignore_index=True)
# Load Experiment ID Parameter Trace
logger.info("Getting Params from {0}".format(path_to_parameters_list))
parameters_list = joblib.load(path_to_parameters_list)
# Apply Burnin
parameters_list = parameters_list.iloc[int(parameters_list.shape[0]*0.33):]
parameters_list = parameters_list[['parameters']]
# Calculate KSTest for each variable
metrics_df = pd.DataFrame()
cur_param_index = 0
logger.info("Calculating KS-Test on {0} parameters".format(
parameters_list.shape[0]))
results = []
plt.close('all')
fig, axes = plt.subplots(1, len(kstest_variables), sharey=False)
for ii, variable in enumerate(kstest_variables):
data_ref = np.array([getattr(param, variable)
for param in reference_parameters['parameters']]).flatten()
data_samp = np.array([getattr(param, variable)
for param in parameters_list['parameters']]).flatten()
statistic, pvalue = ks_2samp(data_samp, data_ref)
results.append(dict(metric='kstest', variable=variable,
value=statistic))
results.append(dict(metric='kstest_pvalue', variable=variable,
value=pvalue))
sns.distplot(data_ref, ax=axes[ii], label='ref')
sns.distplot(data_samp, ax=axes[ii], label='samp')
if pvalue < 0.05:
axes[ii].set_title('{0}\n KS-value: {1:1.2e} ({2:1.2e}*)'.format(
variable, statistic, pvalue))
else:
axes[ii].set_title('{0}\n KS-value: {1:1.2e} ({2:1.2e})'.format(
variable, statistic, pvalue))
axes[-1].legend()
fig.set_size_inches(4*len(kstest_variables), 7)
fig.savefig(os.path.join(path_to_fig, "{0}_trace_density.png".format(
experiment_id)))
results.append(dict(metric='num_samples', variable="trace",
value=parameters_list.shape[0]))
metrics_df = pd.DataFrame(results)
# Save Metrics DF to CSV
path_to_metrics_file = os.path.join(path_to_out,
"{0}_metrics.csv".format(experiment_id))
logger.info("Metrics:\n{0}".format(metrics_df))
logger.info("Saving KSTest metrics to {0}".format(path_to_metrics_file))
pandas_write_df_to_csv(metrics_df, path_to_metrics_file, index=False)
return metrics_df
###############################################################################
## Experiment Specific Functions
###############################################################################
def setup_train_test_data(experiment_folder, experiment_name, T, T_test,
parameter_list, data_reps, init_methods, **kwargs):
""" Setup Synthetic Data """
# Setup Input Folder
path_to_input = os.path.join(experiment_folder, "in")
if not os.path.isdir(path_to_input):
os.makedirs(path_to_input)
# Generate Training + Test Data
logger.info("Generating Training Data + Inits")
input_args = []
# Create + Save Test Data (shared among training sets)
for param_num, (param_name, parameters) in enumerate(parameter_list.items()):
test_data = generate_lgssm_data(T=T_test, parameters=parameters)
test_data_name = "test_data"
path_to_test_data = os.path.join(path_to_input,
"{0}.p".format(test_data_name))
joblib.dump(test_data, path_to_test_data)
for data_rep in range(data_reps):
# Create + Save Training Data
train_data = generate_lgssm_data(T=T, parameters=parameters)
data_name = "train_data_{0}".format(data_rep+data_reps*param_num)
path_to_data = os.path.join(path_to_input,
"{0}.p".format(data_name))
joblib.dump(train_data, path_to_data)
# Generate Inits
for init_num, init_method in enumerate(init_methods):
logger.info("Generating Init {0} of {1}".format(
init_num, len(init_methods)))
# Create + Save Init
path_to_init = os.path.join(path_to_input,
"{0}_init_{1}.p".format(data_name, init_num))
setup_init(
data=train_data,
init_method=init_method,
path_to_init=path_to_init,
)
input_args.append({
'experiment_name': experiment_name,
'path_to_data': path_to_data,
'path_to_test_data': path_to_test_data,
'path_to_init': path_to_init,
'param_name': param_name,
'init_method': init_method,
})
return input_args
def setup_init(data, init_method, path_to_init, n=1, m=1):
""" Setup Init Parameters for data """
if init_method == "prior":
prior = LGSSMPrior.generate_default_prior(n=n, m=m)
sampler = LGSSMSampler(n=n, m=m)
sampler.setup(observations=data['observations'],
prior=prior)
sampler.project_parameters()
init_parameters = sampler.parameters
elif init_method == "truth":
init_parameters = data['parameters']
else:
raise ValueError("Unrecognized init_method")
joblib.dump(init_parameters, path_to_init)
return init_parameters
def setup_options(experiment_folder, arg_list):
# Create Options csv in <experiment_folder>/in
path_to_input = os.path.join(experiment_folder, "in")
if not os.path.isdir(path_to_input):
os.makedirs(path_to_input)
# Sort Arg List by Data x Init Trial
arg_list = sorted(arg_list,
key = lambda k: (k['path_to_data'], k['path_to_init']))
# Assign Experiment ID + Experiment Folder Location
for ii, custom_dict in enumerate(arg_list):
# Set Defaults
arg_dict = DEFAULT_OPTIONS.copy()
arg_dict.update(custom_dict)
arg_dict["experiment_id"] = ii
arg_dict["experiment_folder"] = experiment_folder
arg_list[ii] = arg_dict
path_to_arg_list = os.path.join(path_to_input, "options.p")
logger.info("Saving arg_list as {0}".format(path_to_arg_list))
joblib.dump(arg_list, path_to_arg_list)
options_df = pd.DataFrame(arg_list)
path_to_options_file = os.path.join(path_to_input,"options.csv")
logger.info("Also saving as csv at {0}".format(path_to_options_file))
options_df.to_csv(path_to_options_file, index=False)
return options_df
def get_model_sampler_prior(model_type):
if model_type == "LGSSM":
Sampler = LGSSMSampler
Prior = LGSSMPrior
else:
raise NotImplementedError()
return Sampler, Prior
def get_model_sampler_step(
model_type, inference_method, steps_per_iteration,
epsilon, minibatch_size, subsequence_length, buffer_length,
**kwargs):
""" Returns sampler_func_names + sampler_func_kwargs for SamplerEvaluator"""
step_kwargs = dict(
epsilon = epsilon,
minibatch_size = minibatch_size,
subsequence_length = subsequence_length,
buffer_length = buffer_length,
kind = kwargs.get("kind", "marginal"),
num_samples = kwargs.get("num_samples", None),
**kwargs.get("pf_kwargs", {})
)
if inference_method in ['SGRD', 'SGRLD']:
if 'preconditioner' not in step_kwargs.keys():
step_kwargs['preconditioner'] = LGSSMPreconditioner()
if inference_method == 'SGD':
sampler_func_names = ['step_sgd', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'ADAGRAD':
sampler_func_names = ['step_adagrad', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'SGRD':
sampler_func_names = ['step_sgd', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'SGLD':
sampler_func_names = ['sample_sgld', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'SGRLD':
sampler_func_names = ['sample_sgrld', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'Gibbs':
sampler_func_names = ["sample_gibbs", "project_parameters"]
sampler_func_kwargs = [{}, {}]
sampler_func_names = sampler_func_names * steps_per_iteration
sampler_func_kwargs = sampler_func_kwargs * steps_per_iteration
return sampler_func_names, sampler_func_kwargs
###############################################################################
## Helper / Utility Functions
def dict_product(*args):
# Combine a list of dictionary lists
from itertools import product
return [ {k:v for d in L for k,v in d.items()} for L in product(*args)]
def update_dict(ldict, rdict):
""" Update ldict with key, value pairs from rdict """
updated_dict = ldict.copy()
updated_dict.update(rdict)
return updated_dict
def is_valid_file(filename):
# Check filename exists + is not empty
if not os.path.isfile(filename):
logging.info("Missing File {0}".format(filename))
return False
elif os.path.getsize(filename) <= 1:
# File is currently being written
logging.info("Pausing for 5.0 sec for {0}".format(filename))
time.sleep(5.0)
if os.path.getsize(filename) <= 1:
logging.info("== EMPTY File {0} ==".format(filename))
return False
else:
return True
def process_checkpoint(evaluator, data, experiment_id,
path_to_out, path_to_fig,
checkpoint_num=0, parameters_list=None,
**kwargs):
""" Save Checkpoint """
# Save Metrics
path_to_metrics_file = os.path.join(path_to_out,
"{0}_metrics.csv".format(experiment_id))
logger.info("Saving Metrics to {0}".format(path_to_metrics_file))
pandas_write_df_to_csv(df=evaluator.get_metrics(),
filename=path_to_metrics_file, index=False)
if parameters_list is not None:
path_to_parameters_list = os.path.join(path_to_out,
"{0}_parameters.p".format(experiment_id))
logger.info("Saving Parameters List to {0}".format(
path_to_parameters_list))
joblib_write_to_file(pd.DataFrame(parameters_list),
filename=path_to_parameters_list)
if len(evaluator.metric_functions) > 0 and evaluator.metrics.shape[0] > 0:
path_to_metrics_plot = os.path.join(path_to_fig, "metrics.png")
logger.info("Plotting Metrics to {0}".format(path_to_metrics_plot))
plt.close('all')
g = plot_metrics(evaluator)
g.fig.set_size_inches(12,10)
g.savefig(path_to_metrics_plot)
if len(evaluator.metrics['iteration'].unique()) > 10:
path_to_zoom_metrics_plot = \
os.path.join(path_to_fig, "metrics_zoom.png")
logger.info("Plotting Zoom Metrics to {0}".format(
path_to_zoom_metrics_plot))
plt.close('all')
g = plot_metrics(evaluator, full_trace=False)
g.fig.set_size_inches(12,10)
g.savefig(path_to_zoom_metrics_plot)
if len(evaluator.sample_functions) > 0 and evaluator.samples.shape[0] > 0:
path_to_trace_plot = os.path.join(path_to_fig, "trace.png")
logger.info("Plotting Sample Trace to {0}".format(path_to_trace_plot))
plt.close('all')
fig, axes = plot_trace_plot(evaluator)
fig.set_size_inches(12,10)
fig.savefig(path_to_trace_plot)
if len(evaluator.samples['iteration'].unique()) > 10:
path_to_zoom_trace_plot = \
os.path.join(path_to_fig, "trace_zoom.png")
logger.info("Plotting Zoom Trace to {0}".format(
path_to_zoom_trace_plot))
plt.close('all')
fig, axes = plot_trace_plot(evaluator, full_trace=False)
fig.set_size_inches(12,10)
fig.savefig(path_to_zoom_trace_plot)
return
def convert_gradient(gradient, parameters):
""" Convert gradient w.r.t. LRinv, LQinv, C, A to gradient w.r.t phi, sigma, tau """
new_gradient = np.array([
gradient['A'], # grad w.r.t. A <-> grad w.r.t. phi
gradient['LQinv']*(-parameters.LQinv**-1), # grad w.r.t. sigma
gradient['LRinv']*(-parameters.LRinv**-1), # grad w.r.t. tau
]).flatten()
return new_gradient
###############################################################################
## Run Script ---------------------------------------------------------------
###############################################################################
if __name__=='__main__':
parser = construct_parser()
logging.info("Parsing Args")
args, extra = parser.parse_known_args()
logging.info("Args: %s", str(args))
if extra:
logging.warning("Unused Arguments: {0}".format(extra))
out = main(**vars(args))
logging.info("..Complete")
# EOF
| 57,938 | 38.657084 | 104 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/nonlinear_ssm_pf_experiment_scripts/lgssm/demo_setup.py | """
Create Setup Script for Demo Experiment
Usage:
0. Change `project_root` in this file to match the directory of `sgmcmc_nonlinear_ssm`
1. Run this script, which will create a `setup.sh` script at `<experiment_folder>`
(default is `./scratch/<experiment_name>/scripts/setup.sh`)
This will generate the train + test data, initializations + other scripts
2. Run the `fit.sh` script to fit the models specified in this file
generates output to `<experiment_folder>/out/fit`
3. Run `eval_train.sh` or `eval_test.sh` to evaluate the fits on the train or test data
4. Run `process_out.sh` to aggregate the results to `<experiment_folder>/processed/`
generates csv files that can be used to make figures
the main two csv files of interest are:
"aggregated.csv" and "options.csv" which can be joined together on experiment_id
"""
# Standard Imports
import numpy as np
import pandas as pd
import os
import sys
import joblib
from sklearn.model_selection import ParameterGrid
import logging
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
## Set Experiment Name
experiment_name = "lgssm_demo"
## Filesystem Paths
conda_env_name = None
project_root = "./" # Must be specified (path to "/sgmcmc_ssm_code/")
os.chdir(project_root)
sys.path.append(os.getcwd()) # Fix Python Path
# Paths relative to project root
current_folder = os.path.join("nonlinear_ssm_pf_experiment_scripts", "lgssm")
python_script_path = os.path.join(current_folder,"driver.py")
experiment_folder = os.path.join("scratch", experiment_name) # Path to output
# Synthetic Data Args
data_reps = 1 # Number of training sets
T = 1000 # Training set size
T_test = 1000 # Test set size
init_methods = ['prior', 'truth'] * 1 # number of intializations + how they are initialized
# LGSSM parameters
from sgmcmc_ssm.models.lgssm import (
LGSSMParameters,
)
param_name = 'A=0.9,Q=0.1,R=1'
A = np.eye(1)*0.9
Q = np.eye(1)*0.1
C = np.eye(1)
R = np.eye(1)
LQinv = np.linalg.cholesky(np.linalg.inv(Q))
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = LGSSMParameters(A=A, LQinv=LQinv, C=C, LRinv=LRinv)
parameters.project_parameters()
parameter_list = {param_name: parameters}
# Sampler Args
common_sampler_args = {
'inference_method': ['SGRLD'],
'subsequence_length': [40],
'buffer_length': [10],
'minibatch_size': [1],
'steps_per_iteration': [10],
'max_num_iters': [10000],
'max_time': [300],
'epsilon': [0.1],
}
sampler_args = [
{
'method_name': ['Gibbs'],
'inference_method': ['Gibbs'],
'epsilon': [-1],
'minibatch_size': [-1],
'subsequence_length': [-1],
'buffer_length': [-1],
'steps_per_iteration': [1],
'max_time': [300],
},
{
'method_name': ['KF'],
'kind': ['marginal'],
**common_sampler_args
},
{
'method_name': ['MC_100'],
'kind': ['complete'],
'num_samples': [100],
**common_sampler_args
},
# {
# 'method_name': ['MC_1000'],
# 'kind': ['complete'],
# 'num_samples': [1000],
# **common_sampler_args
# },
{
'method_name': ['NEMETH_100'],
'kind': ['pf'],
'pf_kwargs': [dict(pf='nemeth', N=100, lambduh=0.95)],
**common_sampler_args
},
# {
# 'method_name': ['PARIS_100'],
# 'kind': ['pf'],
# 'pf_kwargs': [dict(pf='paris', N=100, Ntilde=2)],
# **common_sampler_args
# },
# {
# 'method_name': ['POYIADJIS_N2_100'],
# 'kind': ['pf'],
# 'pf_kwargs': [dict(pf='poyiadjis_N2', N=100)],
# **common_sampler_args
# },
]
# Script Kwargs (only really matters when using cluster)
setup_script_kwargs=dict(deploy_target='desktop')
fit_script_kwargs=dict(deploy_target='desktop')
eval_script_kwargs=dict(deploy_target='desktop')
process_out_script_kwargs=dict(deploy_target='desktop')
make_plots_script_kwargs=dict(deploy_target='desktop')
############################################################################
## MAIN SCRIPT
############################################################################
from sgmcmc_ssm.driver_utils import (
script_builder
)
if __name__ == "__main__":
# Setup Folders
logging.info("Creating Folder for {0}".format(experiment_name))
path_to_shell_script = os.path.join(experiment_folder, "scripts")
if not os.path.isdir(experiment_folder):
os.makedirs(experiment_folder)
if not os.path.isdir(path_to_shell_script):
os.makedirs(path_to_shell_script)
# Create Additional Args
path_to_additional_args = os.path.join(experiment_folder,
"setup_additional_args.p")
sampler_args = [arg
for args in sampler_args
for arg in list(ParameterGrid(args))
]
additional_args = dict(
sampler_args=sampler_args,
python_script_path=python_script_path,
path_to_shell_script=path_to_shell_script,
project_root=project_root,
experiment_name=experiment_name,
T=T,
T_test=T_test,
parameter_list=parameter_list,
data_reps=data_reps,
init_methods=init_methods,
conda_env_name=conda_env_name,
fit_script_kwargs=fit_script_kwargs,
eval_script_kwargs=eval_script_kwargs,
process_out_script_kwargs=process_out_script_kwargs,
make_plots_script_kwargs=make_plots_script_kwargs,
)
joblib.dump(additional_args, path_to_additional_args)
# Create Setup Script
bash_file_masters = script_builder(
script_name="setup",
python_script_path=python_script_path,
python_script_args=[{
"--experiment_folder": experiment_folder,
"--path_to_additional_args": path_to_additional_args,
"--setup": None,
}],
path_to_shell_script=path_to_shell_script,
project_root=project_root,
conda_env_name=conda_env_name,
**setup_script_kwargs,
)
logging.info("Run {0} to complete settting up {1}".format(
bash_file_masters[0], experiment_name))
# EOF
| 6,445 | 31.23 | 91 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/nonlinear_ssm_pf_experiment_scripts/garch/driver.py | """ Experiment Driver
Call python <path_to_this_file>.py --help to see documentation
"""
import os
import sys
sys.path.append(os.getcwd()) # Fix Python Path
import numpy as np
import pandas as pd
import joblib
import time
import argparse
from tqdm import tqdm
import functools
import matplotlib
matplotlib.use('Agg') # For Cluster
import matplotlib.pyplot as plt
import seaborn as sns
import logging # For Logs
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
logger = logging.getLogger(name=__name__)
from sgmcmc_ssm.evaluator import (
SamplerEvaluator, OfflineEvaluator, half_average_parameters_list,
)
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
metric_function_parameters,
noisy_logjoint_loglike_metric,
noisy_predictive_logjoint_loglike_metric,
)
from sgmcmc_ssm.driver_utils import (
script_builder, make_path, TqdmToLogger,
pandas_write_df_to_csv, joblib_write_to_file,
)
from sgmcmc_ssm.plotting_utils import (
plot_metrics, plot_trace_plot,
)
from sgmcmc_ssm.models.garch import (
GARCHSampler,
GARCHPrior,
generate_garch_data,
)
DEFAULT_OPTIONS = dict(
model_type = "GARCH",
prior_variance = 1.0,
max_num_iters = 1000000,
max_time = 60,
eval_freq = 5,
max_eval_iterations = 1000,
max_eval_time = 60,
steps_per_iteration = 1,
checkpoint_num_iters = 1000,
checkpoint_time = 60*30,
)
## Script Argument Parser
def construct_parser():
""" Define script argument parser """
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
)
# Key Value Args
parser.add_argument("--experiment_folder",
help="path to experiment",
type=str,
)
parser.add_argument("--experiment_id",
default=0,
help="id of experiment (optional)",
type=int,
)
parser.add_argument("--path_to_additional_args", default="",
help="additional arguments to pass to setup",
type=str,
)
# Action Args
parser.add_argument("--setup", action='store_const', const=True,
help="flag for whether to setup data, inits, and fit/eval args",
)
parser.add_argument("--fit", action='store_const', const=True,
help="flag for whether to run sampler/optimization",
)
parser.add_argument("--eval", default="",
help="run evaluation of parameters on target data (e.g. 'train', 'test', 'half_avg_train')",
type=str,
)
parser.add_argument("--trace_eval", default="",
help="run evaluation on parameter trace (e.g. 'ksd', 'kstest')",
type=str,
)
parser.add_argument("--process_out", action='store_const', const=True,
help="flag for whether to aggregate output",
)
parser.add_argument("--make_plots", action='store_const', const=True,
help="flag for whether to plot aggregated output",
)
parser.add_argument("--make_scripts", action='store_const', const=True,
help="flag for setup to only recreate scripts",
)
return parser
## Main Dispatcher
def main(experiment_folder, experiment_id, path_to_additional_args,
setup, fit, eval, trace_eval, process_out, make_plots,
make_scripts, **kwargs):
""" Main Dispatcher see construct_parser for argument help """
if kwargs:
logger.warning("Unused kwargs: {0}".format(kwargs))
out = {}
if setup:
out['setup'] = do_setup(experiment_folder, path_to_additional_args)
make_scripts = True
logging.info("Extracting Options for experiment id {0}".format(
experiment_id))
path_to_arg_list = os.path.join(experiment_folder, "in", "options.p")
arg_list = joblib.load(path_to_arg_list)
experiment_options = arg_list[experiment_id]
logger.info("Experiment Options: {0}".format(experiment_options))
if make_scripts:
out['make_scripts'] = do_make_scripts(
experiment_folder, path_to_additional_args, arg_list)
if fit:
out['fit'] = do_fit(**experiment_options)
if eval != "":
for eval_ in eval.split(","):
if eval_ in ['train', 'half_avg_train', 'test', 'half_avg_test']:
out['eval_{0}'.format(eval_)] = do_eval(
target=eval_,
**experiment_options,
)
else:
raise ValueError("Unrecognized 'eval' target {0}".format(eval_))
if trace_eval != "":
for trace_eval_ in trace_eval.split(","):
if trace_eval_ == "ksd":
out['trace_eval_{0}'.format(trace_eval)] = do_eval_ksd(
**experiment_options,
)
elif trace_eval_ == "ess":
raise NotImplementedError()
elif trace_eval_ == "kstest":
out['trace_eval_{0}'.format(trace_eval)] = do_eval_ks_test(
**experiment_options,
)
else:
raise ValueError(
"Unrecognized 'trace_eval' target {0}".format(trace_eval_))
if process_out:
out['process_out'] = do_process_out(experiment_folder)
if make_plots:
out['make_plots'] = do_make_plots(experiment_folder)
if len(out.keys()) == 0:
raise ValueError("No Flags Set")
return out
## Setup Function
def do_setup(experiment_folder, path_to_additional_args):
""" Setup Shell Scripts for Experiment """
additional_args = joblib.load(path_to_additional_args)
# Setup Data
logger.info("Setting Up Data")
data_args = setup_train_test_data(experiment_folder, **additional_args)
# Setup
logger.info("Saving Experiment Options per ID")
sampler_args = additional_args['sampler_args']
arg_list = dict_product(sampler_args, data_args)
options_df = setup_options(experiment_folder, arg_list)
return options_df
## Make Scripts
def do_make_scripts(experiment_folder, path_to_additional_args, arg_list):
additional_args = joblib.load(path_to_additional_args)
options_df = pd.DataFrame(arg_list)
# Setup Shell Scripts
logger.info("Setting up Shell Scripts")
shell_args_base = [{
'--experiment_folder': experiment_folder,
'--experiment_id': experiment_id,
} for experiment_id in options_df['experiment_id']
]
# Fit Script
script_builder(
script_name = "fit",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--fit": None}) for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('fit_script_kwargs', {})
)
# Eval Scripts
script_builder(
script_name = "eval_train",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--eval": 'half_avg_train'})
for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('eval_script_kwargs', {})
)
script_builder(
script_name = "eval_test",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--eval": 'half_avg_test'})
for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('eval_script_kwargs', {})
)
script_builder(
script_name = "trace_eval",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--trace_eval": 'ksd'})
for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('eval_script_kwargs', {})
)
# Process Script
script_builder(
script_name = "process_out",
python_script_path = additional_args['python_script_path'],
python_script_args = [{
"--experiment_folder": experiment_folder,
"--process_out": None,
}],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('process_out_script_kwargs', {})
)
# Plot Script
script_builder(
script_name = "make_plots",
python_script_path = additional_args['python_script_path'],
python_script_args = [{
"--experiment_folder": experiment_folder,
"--make_plots": None,
}],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('make_plots_script_kwargs', {})
)
# Run All Script
path_to_runall_script = os.path.join(
additional_args['path_to_shell_script'], 'run_all.sh')
with open(path_to_runall_script, 'w') as f:
f.write("#!/bin/bash\n")
f.write("cd {0}\n".format(additional_args['project_root']))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'fit.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'eval_train.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'eval_test.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'process_out.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'make_plots.sh')))
os.chmod(path_to_runall_script, 0o775)
logger.info("Run All Script at {0}".format(path_to_runall_script))
# Clear All Script
path_to_clear_script = os.path.join(
additional_args['path_to_shell_script'], 'clear_all.sh')
with open(path_to_clear_script, 'w') as f:
f.write("#!/bin/bash\n")
f.write("cd {0}\n".format(
os.path.join(additional_args['project_root'], experiment_folder)))
f.write("rm -r ./in ./out ./scratch ./fig\n".format(os.path.basename(
path_to_additional_args)))
f.write("cd {0}\n".format(
os.path.join(additional_args['project_root'],
additional_args['path_to_shell_script'])
))
f.write("rm -r ./fit ./eval_train ./eval_test ./process_out ./make_plots ./trace_eval\n")
os.chmod(path_to_clear_script, 0o775)
logger.info("Clear Script at {0}".format(path_to_clear_script))
return options_df
## Fit Module
def do_fit(
experiment_name, experiment_id,
experiment_folder, path_to_data, path_to_init,
model_type, prior_variance,
inference_method, eval_freq,
max_num_iters, steps_per_iteration, max_time,
checkpoint_num_iters, checkpoint_time,
**kwargs):
""" Fit function
Saves list of parameters + runtimes to <experiment_folder>/out/fit/
Args:
experiment_name, experiment_id - experiment id parameters
experiment_folder, path_to_data, path_to_init - paths to input + output
model_type, prior_variance - args for get_model_sampler_prior()
inference_method - get_model_sampler_step()
eval_freq - how frequently to eval metric funcs
max_num_iters, steps_per_iteration, max_time - how long to fit/train
checkpoint_num_iters, checkpoint_time - how frequent to checkpoint
**kwargs - contains inference_method kwargs
"""
logger.info("Beginning Experiment {0} for id:{1}".format(
experiment_name, experiment_id))
Sampler, Prior = get_model_sampler_prior(model_type)
# Make Paths
path_to_out = os.path.join(experiment_folder, "out", "fit")
path_to_fig = os.path.join(experiment_folder, "fig", "fit",
"{0:0>4}".format(experiment_id))
path_to_scratch = os.path.join(experiment_folder, 'scratch')
path_to_fit_state = os.path.join(path_to_scratch,
"fit_{0:0>4}_state.p".format(experiment_id))
make_path(path_to_out)
make_path(path_to_fig)
make_path(path_to_scratch)
# Load Train Data
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
observations = data['observations']
# Set Metric + Sample Functions for Evaluator
parameter_names = ['alpha', 'beta', 'gamma', 'log_mu', 'logit_lambduh', 'logit_phi', 'tau']
sample_functions = [sample_function_parameters(parameter_names)]
metric_functions = []
if 'parameters' in data.keys():
metric_functions += [
metric_function_parameters(
parameter_names = parameter_names,
target_values = [getattr(data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names = ['logmse' for _ in parameter_names],
)
]
# Check if existing sampler and evaluator state exists
if os.path.isfile(path_to_fit_state):
logger.info("Continuing Evaluation from {0}".format(path_to_fit_state))
fit_state = joblib.load(path_to_fit_state)
init_parameters = fit_state['parameters']
parameters_list = fit_state['parameters_list']
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance, **init_parameters.dim
),
parameters=init_parameters,
)
evaluator = SamplerEvaluator(sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
init_state=fit_state['evaluator_state'],
)
else:
logger.info("Getting Init at {0}".format(path_to_init))
init_parameters = joblib.load(path_to_init)
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance, **init_parameters.dim
),
parameters=init_parameters,
)
evaluator = SamplerEvaluator(sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
parameters_list = [
dict(
iteration=evaluator.iteration,
elapsed_time=evaluator.elapsed_time,
parameters=evaluator.sampler.parameters.copy()
)
]
# Save Init Figures
logger.info("Saving Init Figures")
process_checkpoint(
evaluator=evaluator,
data=data,
parameters_list=parameters_list,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
checkpoint_num=evaluator.iteration,
)
# Sampler Funcs
sampler_func_names, sampler_func_kwargs = get_model_sampler_step(
model_type=model_type,
inference_method=inference_method,
steps_per_iteration=steps_per_iteration,
**kwargs
)
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(range(evaluator.iteration, max_num_iters),
file=tqdm_out, mininterval=60)
last_checkpoint_time = time.time()
last_eval_time = time.time() - eval_freq
start_time = time.time()
max_time_exceeded = False
for step in p_bar:
# Execute sampler_func_names
if (time.time() - start_time > max_time):
logger.info("Max Time Elapsed: {0} > {1}".format(
time.time() - start_time, max_time))
max_time_exceeded = True
try:
if (time.time() - last_eval_time > eval_freq) or \
(step == max_num_iters -1) or max_time_exceeded:
evaluator.evaluate_sampler_step(
sampler_func_names, sampler_func_kwargs, evaluate=True,
)
parameters_list.append(
dict(
iteration=evaluator.iteration,
elapsed_time=evaluator.elapsed_time,
parameters=evaluator.sampler.parameters.copy()
)
)
last_eval_time=time.time()
else:
evaluator.evaluate_sampler_step(
sampler_func_names, sampler_func_kwargs, evaluate=False,
)
except:
# Checkpoint On Error
process_checkpoint(
evaluator=evaluator,
data=data,
parameters_list=parameters_list,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
checkpoint_num=evaluator.iteration,
)
fit_state = evaluator.get_state()
logger.info("Saving Evaluator State to {0}".format(
path_to_fit_state))
joblib_write_to_file(
dict(evaluator_state=fit_state,
parameters=evaluator.sampler.parameters,
parameters_list=parameters_list),
path_to_fit_state)
raise RuntimeError()
# Check to Checkpoint Current Results
if (step % checkpoint_num_iters == 0) or \
(time.time() - last_checkpoint_time > checkpoint_time) or \
(step == max_num_iters-1) or max_time_exceeded:
process_checkpoint(
evaluator=evaluator,
data=data,
parameters_list=parameters_list,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
checkpoint_num=evaluator.iteration,
)
fit_state = evaluator.get_state()
logger.info("Saving Evaluator State to {0}".format(
path_to_fit_state))
joblib_write_to_file(
dict(evaluator_state=fit_state,
parameters=evaluator.sampler.parameters,
parameters_list=parameters_list),
path_to_fit_state)
# Reset Checkpoint Clock
last_checkpoint_time = time.time()
if max_time_exceeded:
break
return evaluator
## Evaluate Module
def do_eval(target,
experiment_name, experiment_id,
experiment_folder,
model_type, prior_variance,
max_eval_iterations, max_eval_time,
checkpoint_num_iters, checkpoint_time,
**kwargs):
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
logger.info("Beginning Evaluation of {0} id:{1} on {2}".format(
experiment_name, experiment_id, target,
))
Sampler, Prior = get_model_sampler_prior(model_type)
# Paths
path_to_parameters_list = os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(experiment_id))
path_to_out = os.path.join(experiment_folder, "out",
"eval{0}".format(target))
path_to_fig = os.path.join(experiment_folder, "fig",
"eval{0}".format(target),"{0:0>4}".format(experiment_id))
path_to_scratch = os.path.join(experiment_folder, 'scratch')
path_to_eval_state = os.path.join(path_to_scratch,
"eval{1}_{0:0>4}_state.p".format(experiment_id, target))
make_path(path_to_out)
make_path(path_to_fig)
make_path(path_to_scratch)
# Get Data
if target in ["train", "half_avg_train"]:
path_to_data = kwargs['path_to_data']
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
elif target in ["test", "half_avg_test"]:
path_to_data = kwargs['path_to_test_data']
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
else:
raise ValueError("Invalid target {0}".format(target))
# Setup Sampler
logger.info("Setting up Sampler")
path_to_init = kwargs['path_to_init']
init_parameters = joblib.load(path_to_init)
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
observations = data['observations']
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance,
**init_parameters.dim
),
)
# Set Metric + Sample Functions for Evaluator
parameter_names = ['alpha', 'beta', 'gamma', 'log_mu', 'logit_lambduh', 'logit_phi', 'tau']
sample_functions = [sample_function_parameters(parameter_names)]
metric_functions = [noisy_logjoint_loglike_metric(kind='pf', N=5000),
noisy_predictive_logjoint_loglike_metric(kind='pf', num_steps_ahead=5, N=5000)]
if 'parameters' in data.keys():
metric_functions += [
metric_function_parameters(
parameter_names = parameter_names,
target_values = [getattr(data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names = ['logmse' for _ in parameter_names],
)
]
# if 'latent_vars' in data.keys():
# metric_functions += [metric_compare_x(true_x=data['latent_vars'])]
# Get parameters_list
logger.info("Getting Params from {0}".format(path_to_parameters_list))
parameters_list = joblib.load(path_to_parameters_list)
if target in ["half_avg_train", "half_avg_test"]:
logger.info("Calculating Running Average of Parameters")
parameters_list['parameters'] = \
half_average_parameters_list(parameters_list['parameters'])
# Setup Evaluator
logger.info("Setting up Evaluator")
# Check if existing evaluator state exists
if os.path.isfile(path_to_eval_state):
logger.info("Continuing Evaluation from {0}".format(path_to_eval_state))
eval_state = joblib.load(path_to_eval_state)
evaluator = OfflineEvaluator(sampler,
parameters_list=parameters_list,
metric_functions=metric_functions,
sample_functions=sample_functions,
init_state = eval_state,
)
else:
logger.info("Initializing Evaluation from scratch")
evaluator = OfflineEvaluator(sampler,
parameters_list=parameters_list,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
process_checkpoint(
evaluator=evaluator,
data=data,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
)
# Evaluation
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
logger.info("Found {0} parameters to eval".format(evaluator.num_to_eval()))
max_iterations = min([max_eval_iterations, evaluator.num_to_eval()])
p_bar = tqdm(range(max_iterations), file=tqdm_out, mininterval=60)
last_checkpoint_time = time.time() - checkpoint_time
start_time = time.time()
max_time_exceeded = False
for p_iter in p_bar:
if (time.time() - start_time > max_eval_time):
logger.info("Max Time Elapsed: {0} > {1}".format(
time.time() - start_time, max_eval_time))
max_time_exceeded = True
# Offline Evaluation
evaluator.evaluate(num_to_eval=1)
if ((time.time()-last_checkpoint_time) > checkpoint_time) or \
(p_iter == max_iterations-1) or max_time_exceeded:
process_checkpoint(
evaluator=evaluator,
data=data,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
)
eval_state = evaluator.get_state()
logger.info("Saving Evaluator State to {0}".format(path_to_eval_state))
joblib_write_to_file(eval_state, path_to_eval_state)
# Reset Checkpoint Clock
last_checkpoint_time = time.time()
if max_time_exceeded:
break
return evaluator
## Combine dfs from individual experiments
def do_process_out(experiment_folder):
""" Process Output
Aggregate files of form .../out/../{id}_{**}.csv
"""
path_to_out = os.path.join(experiment_folder, 'out')
path_to_options = os.path.join(experiment_folder, 'in', 'options.csv')
path_to_processed = os.path.join(experiment_folder, "processed")
make_path(path_to_processed)
subfolders = os.listdir(path_to_out)
# Copy Options to processed
logger.info("Copying Options")
options_df = pd.read_csv(path_to_options, index_col=False)
pandas_write_df_to_csv(options_df,
filename=os.path.join(path_to_processed, "options.csv"),
index=False)
# Try to Aggregate Data [evaltrain+evaltest, fit_metrics[time], options]
aggregated_columns = [
'iteration', 'metric', 'value', 'variable',
'eval_set', 'time', 'iteration_time', 'experiment_id',
]
evaltargets = ['evaltrain', 'evalhalf_avg_train',
'evaltest', 'evalhalf_avg_test']
if ('fit' in subfolders) and (len(set(subfolders).intersection(
set(evaltargets))) > 0):
path_to_aggregated_df = os.path.join(path_to_processed,"aggregated.csv")
logger.info("Aggregating Data to {0}".format(path_to_aggregated_df))
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(list(enumerate(options_df['experiment_id'].unique())),
file=tqdm_out, mininterval=60)
new_csv_flag = True
for ii, experiment_id in p_bar:
eval_df = pd.DataFrame()
for evaltarget in evaltargets:
# LOAD EVAL TARGET FILE
if evaltarget in subfolders:
eval_target_file = os.path.join(path_to_out, evaltarget,
'{0}_metrics.csv'.format(experiment_id),
)
if not is_valid_file(eval_target_file):
continue
eval_target_df = pd.read_csv(
eval_target_file, index_col=False,
).assign(eval_set=evaltarget)
eval_df = pd.concat([eval_df, eval_target_df],
ignore_index=True, sort=True)
# LOAD FIT FILE
fit_file = os.path.join(path_to_out, 'fit',
'{0}_metrics.csv'.format(experiment_id),
)
if not is_valid_file(fit_file):
continue
fit_df = pd.read_csv(fit_file, index_col=False)
fit_df = fit_df[fit_df['iteration'].isin(eval_df['iteration'])]
iteration_time = fit_df.query("metric == 'time'")[
['iteration', 'value']].rename(
columns={'value':'iteration_time'})
run_time = fit_df.query("metric == 'runtime'")[
['iteration', 'value']].rename(
columns={'value':'time'})
df = pd.merge(eval_df, iteration_time, how='left', on=['iteration'])
df = pd.merge(df, run_time, how='left', on=['iteration'])
df = df.sort_values('iteration').assign(experiment_id=experiment_id)
if new_csv_flag:
df[aggregated_columns].to_csv(path_to_aggregated_df,
index=False)
new_csv_flag = False
else:
df.reindex(columns=aggregated_columns).to_csv(
path_to_aggregated_df, mode='a', header=False,
index=False)
logger.info("Done Aggregating Data: {0}".format(path_to_aggregated_df))
# Also concat out folder csvs
for subfolder in subfolders:
# Only Process Folders
path_to_subfolder = os.path.join(path_to_out, subfolder)
if not os.path.isdir(path_to_subfolder):
logger.info("Ignoring file {0}".format(subfolder))
continue
logger.info("Combining Data in Folder {0}".format(path_to_subfolder))
filenames = os.listdir(path_to_subfolder)
# Combine Metrics
metric_filenames = [name for name in filenames
if name.endswith("metrics.csv")]
path_to_metric_df = os.path.join(path_to_processed,
"{0}_metrics.csv".format(subfolder))
logger.info("Aggregating Data to {0}".format(path_to_metric_df))
# Concat by appending to one large csv
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(list(enumerate(metric_filenames)), file=tqdm_out,
mininterval=60)
new_csv_flag = True
for ii, name in p_bar:
file_name = os.path.join(path_to_subfolder, name)
if not is_valid_file(file_name):
continue
metric_df = pd.read_csv(file_name, index_col=False)
metric_df['experiment_id'] = name.split("_")[0]
if new_csv_flag:
metric_df.to_csv(path_to_metric_df, index=False)
metric_df_columns = list(metric_df.columns.values)
new_csv_flag = False
else:
metric_df.reindex(columns=metric_df_columns).to_csv(
path_to_metric_df, mode='a', header=False, index=False)
logger.info("Metric Data Aggregated to {0}".format(path_to_metric_df))
return
## Make Quick Plots
def do_make_plots(experiment_folder):
""" Make quick plots based on aggregated.csv output of `do_process_out` """
path_to_processed = os.path.join(experiment_folder, 'processed')
path_to_fig = os.path.join(experiment_folder, 'fig', 'processed')
make_path(path_to_fig)
logger.info("Loading Data")
aggregated_df = pd.read_csv(
os.path.join(path_to_processed, 'aggregated.csv'))
options_df = pd.read_csv(
os.path.join(path_to_processed, 'options.csv'))
evaltargets = aggregated_df['eval_set'].unique()
logger.info("Making Plots for {0}".format(evaltargets))
for evaltarget in evaltargets:
logger.info("Processing Data for {0}".format(evaltarget))
sub_df = pd.merge(
aggregated_df[aggregated_df['eval_set'] == evaltarget],
options_df[['method_name', 'experiment_id']],
on='experiment_id',
)
sub_df['variable_metric'] = sub_df['variable'] + '_' + sub_df['metric']
logger.info("Plotting metrics vs time for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='time', y='value', hue='method_name', kind='line',
col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df,
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_time.png'.format(evaltarget)))
logger.info("Plotting metrics vs iteration for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='iteration', y='value', hue='method_name',
kind='line', col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df,
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_iteration.png'.format(evaltarget)))
## After Burnin
if sub_df.query('iteration > 100').shape[0] > 0:
logger.info("Plotting metrics vs time after burnin for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='time', y='value', hue='method_name', kind='line',
col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df.query('iteration > 100'),
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_time_burnin.png'.format(evaltarget)))
logger.info("Plotting metrics vs iteration for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='iteration', y='value', hue='method_name',
kind='line', col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df.query('iteration > 100'),
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_iteration_burnin.png'.format(evaltarget)))
return
## Evaluate Parameter Sample Quality
def do_eval_ksd(
experiment_name, experiment_id,
experiment_folder,
model_type, prior_variance,
max_eval_iterations, max_eval_time,
checkpoint_num_iters, checkpoint_time,
ksd_burnin=0.33, ksd_subsequence_length=1000, ksd_buffer_length=10,
**kwargs):
""" Evaluate the Kernelized Stein Divergence
Pseudocode:
Load Train Data + Setup Sampler
Load Parameter Trace for Experiment Id (apply burnin)
For each parameter, calculate the gradient of the logjoint
(if using noisy gradients, take average over multiple replications)
Compute KSD at each checkpoint
Checkpoints results to out/eval_ksd for each experiment_id
"""
from sgmcmc_ssm.trace_metric_functions import compute_KSD
GRAD_DIM = 4
GRAD_VARIABLES = ['log_mu', 'logit_phi', 'logit_lambduh', 'tau']
logger.info("Beginning KSD Evaluation of {0} id:{1}".format(
experiment_name, experiment_id,
))
# Paths
path_to_parameters_list = os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(experiment_id))
path_to_out = os.path.join(experiment_folder, "out",
"trace_eval_ksd")
# path_to_fig = os.path.join(experiment_folder, "fig",
# "trace_eval_ksd","{0:0>4}".format(experiment_id))
path_to_scratch = os.path.join(experiment_folder, 'scratch')
path_to_checkpoint_state = os.path.join(path_to_scratch,
"trace_eval_ksd_{0:0>4}_state.p".format(experiment_id))
make_path(path_to_out)
# make_path(path_to_fig)
make_path(path_to_scratch)
# Load Train Data + Setup Sampler
Sampler, Prior = get_model_sampler_prior(model_type)
path_to_data = kwargs['path_to_data']
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
logger.info("Setting up Sampler")
path_to_init = kwargs['path_to_init']
init_parameters = joblib.load(path_to_init)
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
observations = data['observations']
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance,
**init_parameters.dim
),
)
if not os.path.isfile(path_to_checkpoint_state):
# Load parameter_list
logger.info("Getting Params from {0}".format(path_to_parameters_list))
parameters_list = joblib.load(path_to_parameters_list).copy()
# Apply Burnin
parameters_list = parameters_list.iloc[int(parameters_list.shape[0]*ksd_burnin):]
parameters_list['num_ksd_eval'] = 0.0
parameters_list['grad'] = [np.zeros(GRAD_DIM) for _ in range(parameters_list.shape[0])]
metrics_df = pd.DataFrame()
cur_param_index = 0
logger.info("Calculating KSD on {0} parameters".format(
parameters_list.shape[0]))
else:
# Load metrics_df + parameter_list from checkpoint
logger.info("Loading parameters from previous checkpoint")
checkpoint_state = joblib.load(path_to_checkpoint_state)
parameters_list = checkpoint_state['parameters_list']
metrics_df = checkpoint_state['metrics_df']
cur_param_index = checkpoint_state['cur_param_index']
logger.info("Found {0} parameters with at least {1} evals".format(
parameters_list.shape[0], parameters_list['num_ksd_eval'].min()))
# Terminate after 1 pass if exact KSD
if (ksd_subsequence_length == -1) or \
(ksd_subsequence_length >= data['observations'].shape[0]):
if (cur_param_index == 0) and \
(parameters_list['num_ksd_eval'].min() >= 1):
logger.info("Already computed exact KSD")
return metrics_df
max_iterations = max_eval_iterations*parameters_list.shape[0]
start_time = time.time()
max_time_exceeded = False
last_checkpoint_time = time.time()
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(range(max_iterations), file=tqdm_out, mininterval=60)
for ii in p_bar:
if (time.time() - start_time > max_eval_time):
logger.info("Max Time Elapsed: {0} > {1}".format(
time.time() - start_time, max_eval_time))
max_time_exceeded = True
parameters = parameters_list['parameters'].iloc[cur_param_index]
sampler.parameters = parameters
grad = convert_gradient(
gradient = sampler.noisy_gradient(
kind="pf", pf='poyiadjis_N', N=10000,
subsequence_length=ksd_subsequence_length,
buffer_length=ksd_buffer_length,
is_scaled=False),
parameters=parameters,
)
index = parameters_list.index[cur_param_index]
parameters_list.at[index,'grad'] += grad
parameters_list.at[index,'num_ksd_eval'] += 1.0
# Update parameter index for next loop
cur_param_index += 1
if cur_param_index == parameters_list.shape[0]:
logger.info("Completed {0} passes over all parameters".format(
parameters_list['num_ksd_eval'].min()))
cur_param_index = 0
if parameters_list['num_ksd_eval'].min() > 50:
logger.info("Completed more than 50 passes, Exiting Early...")
max_time_exceeded=True
# Checkpoint Results
if ((time.time() - last_checkpoint_time > checkpoint_time) or
(cur_param_index == 0) or (ii+1 == max_eval_iterations) or
max_time_exceeded):
# Compute KSD
sub_list = parameters_list[parameters_list['num_ksd_eval'] > 0]
param_list = sub_list['parameters']
grad_list = sub_list['grad'] / sub_list['num_ksd_eval']
result_dict = compute_KSD(
param_list=param_list.tolist(), grad_list=grad_list.tolist(),
variables=GRAD_VARIABLES,
max_block_size=512, # Block Size for computing kernel
)
new_metric_df = pd.DataFrame([
dict(metric='ksd', variable=key, value=value,
num_samples = cur_param_index-1,
num_evals = parameters_list['num_ksd_eval'].min(),
) for key, value in result_dict.items()
])
metrics_df = pd.concat([metrics_df, new_metric_df],
ignore_index=True, sort=True)
# Save Metrics DF to CSV
path_to_metrics_file = os.path.join(path_to_out,
"{0}_metrics.csv".format(experiment_id))
logger.info("Saving KSD metrics to {0}".format(path_to_metrics_file))
pandas_write_df_to_csv(metrics_df, path_to_metrics_file, index=False)
# Checkpoint State
logger.info("Saving checkpoint to {0}".format(
path_to_checkpoint_state))
joblib_write_to_file(dict(
parameters_list=parameters_list,
metrics_df=metrics_df,
cur_param_index=cur_param_index,
), path_to_checkpoint_state)
# Reset Checkpoint Clock
last_checkpoint_time = time.time()
# # Terminate after 1 pass if exact KSD
# if (ksd_subsequence_length == -1) or \
# (ksd_subsequence_length >= data['observations'].shape[0]):
# if cur_param_index == 0:
# break
# Terminate if max_time_exceeded
if max_time_exceeded:
break
return metrics_df
###############################################################################
## Experiment Specific Functions
###############################################################################
def setup_train_test_data(experiment_folder, experiment_name, T, T_test,
parameter_list, data_reps, init_methods, **kwargs):
""" Setup Synthetic Data """
# Setup Input Folder
path_to_input = os.path.join(experiment_folder, "in")
if not os.path.isdir(path_to_input):
os.makedirs(path_to_input)
# Generate Training + Test Data
logger.info("Generating Training Data + Inits")
input_args = []
# Create + Save Test Data (shared among training sets)
for param_num, (param_name, parameters) in enumerate(parameter_list.items()):
logger.info("Generating Data for {0}".format(param_name))
test_data = generate_garch_data(T=T_test, parameters=parameters)
test_data_name = "test_data_{0}".format(param_num)
path_to_test_data = os.path.join(path_to_input,
"{0}.p".format(test_data_name))
joblib.dump(test_data, path_to_test_data)
for data_rep in range(data_reps):
# Create + Save Training Data
train_data = generate_garch_data(T=T, parameters=parameters)
data_name = "train_data_{0}".format(data_rep+data_reps*param_num)
path_to_data = os.path.join(path_to_input,
"{0}.p".format(data_name))
joblib.dump(train_data, path_to_data)
# Generate Inits
for init_num, init_method in enumerate(init_methods):
logger.info("Generating Init {0} of {1}".format(
init_num, len(init_methods)))
# Create + Save Init
path_to_init = os.path.join(path_to_input,
"{0}_init_{1}.p".format(data_name, init_num))
setup_init(
data=train_data,
init_method=init_method,
path_to_init=path_to_init,
**parameters.dim
)
input_args.append({
'experiment_name': experiment_name,
'path_to_data': path_to_data,
'path_to_test_data': path_to_test_data,
'path_to_init': path_to_init,
'param_name': param_name,
'init_method': init_method,
})
return input_args
def setup_init(data, init_method, path_to_init, n=1, m=1):
""" Setup Init Parameters for data """
if init_method == "prior":
prior = GARCHPrior.generate_default_prior(m=m)
init_parameters = prior.sample_prior()
init_parameters.project_parameters()
elif init_method == "truth":
init_parameters = data['parameters']
else:
raise ValueError("Unrecognized init_method")
joblib.dump(init_parameters, path_to_init)
return init_parameters
def setup_options(experiment_folder, arg_list):
# Create Options csv in <experiment_folder>/in
path_to_input = os.path.join(experiment_folder, "in")
if not os.path.isdir(path_to_input):
os.makedirs(path_to_input)
# Sort Arg List by Data x Init Trial
arg_list = sorted(arg_list,
key = lambda k: (k['path_to_data'], k['path_to_init']))
# Assign Experiment ID + Experiment Folder Location
for ii, custom_dict in enumerate(arg_list):
# Set Defaults
arg_dict = DEFAULT_OPTIONS.copy()
arg_dict.update(custom_dict)
arg_dict["experiment_id"] = ii
arg_dict["experiment_folder"] = experiment_folder
arg_list[ii] = arg_dict
path_to_arg_list = os.path.join(path_to_input, "options.p")
logger.info("Saving arg_list as {0}".format(path_to_arg_list))
joblib.dump(arg_list, path_to_arg_list)
options_df = pd.DataFrame(arg_list)
path_to_options_file = os.path.join(path_to_input,"options.csv")
logger.info("Also saving as csv at {0}".format(path_to_options_file))
options_df.to_csv(path_to_options_file, index=False)
return options_df
def get_model_sampler_prior(model_type):
if model_type == "GARCH":
Sampler = GARCHSampler
Prior = GARCHPrior
else:
raise NotImplementedError()
return Sampler, Prior
def get_model_sampler_step(
model_type, inference_method, steps_per_iteration,
epsilon, minibatch_size, subsequence_length, buffer_length,
**kwargs):
""" Returns sampler_func_names + sampler_func_kwargs for SamplerEvaluator"""
step_kwargs = dict(
epsilon = epsilon,
minibatch_size = minibatch_size,
subsequence_length = subsequence_length,
buffer_length = buffer_length,
kind = kwargs.get("kind", "marginal"),
num_samples = kwargs.get("num_samples", None),
**kwargs.get("pf_kwargs", {})
)
if inference_method in ['SGRD', 'SGRLD']:
if 'preconditioner' not in step_kwargs.keys():
raise NotImplementedError()
# step_kwargs['preconditioner'] = GARCHPreconditioner()
if inference_method == 'SGD':
sampler_func_names = ['step_sgd', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'ADAGRAD':
sampler_func_names = ['step_adagrad', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'SGRD':
sampler_func_names = ['step_sgd', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'SGLD':
sampler_func_names = ['sample_sgld', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'SGRLD':
sampler_func_names = ['sample_sgrld', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'Gibbs':
sampler_func_names = ["sample_gibbs", "project_parameters"]
sampler_func_kwargs = [{}, {}]
sampler_func_names = sampler_func_names * steps_per_iteration
sampler_func_kwargs = sampler_func_kwargs * steps_per_iteration
return sampler_func_names, sampler_func_kwargs
###############################################################################
## Helper / Utility Functions
def dict_product(*args):
# Combine a list of dictionary lists
from itertools import product
return [ {k:v for d in L for k,v in d.items()} for L in product(*args)]
def update_dict(ldict, rdict):
""" Update ldict with key, value pairs from rdict """
updated_dict = ldict.copy()
updated_dict.update(rdict)
return updated_dict
def is_valid_file(filename):
# Check filename exists + is not empty
if not os.path.isfile(filename):
logging.info("Missing File {0}".format(filename))
return False
elif os.path.getsize(filename) <= 1:
# File is currently being written
logging.info("Pausing for 5.0 sec for {0}".format(filename))
time.sleep(5.0)
if os.path.getsize(filename) <= 1:
logging.info("== EMPTY File {0} ==".format(filename))
return False
else:
return True
def process_checkpoint(evaluator, data, experiment_id,
path_to_out, path_to_fig,
checkpoint_num=0, parameters_list=None,
**kwargs):
""" Save Checkpoint """
# Save Metrics
path_to_metrics_file = os.path.join(path_to_out,
"{0}_metrics.csv".format(experiment_id))
logger.info("Saving Metrics to {0}".format(path_to_metrics_file))
pandas_write_df_to_csv(df=evaluator.get_metrics(),
filename=path_to_metrics_file, index=False)
if parameters_list is not None:
path_to_parameters_list = os.path.join(path_to_out,
"{0}_parameters.p".format(experiment_id))
logger.info("Saving Parameters List to {0}".format(
path_to_parameters_list))
joblib_write_to_file(pd.DataFrame(parameters_list),
filename=path_to_parameters_list)
if len(evaluator.metric_functions) > 0 and evaluator.metrics.shape[0] > 0:
path_to_metrics_plot = os.path.join(path_to_fig, "metrics.png")
logger.info("Plotting Metrics to {0}".format(path_to_metrics_plot))
plt.close('all')
g = plot_metrics(evaluator)
g.fig.set_size_inches(12,10)
g.savefig(path_to_metrics_plot)
if len(evaluator.metrics['iteration'].unique()) > 10:
path_to_zoom_metrics_plot = \
os.path.join(path_to_fig, "metrics_zoom.png")
logger.info("Plotting Zoom Metrics to {0}".format(
path_to_zoom_metrics_plot))
plt.close('all')
g = plot_metrics(evaluator, full_trace=False)
g.fig.set_size_inches(12,10)
g.savefig(path_to_zoom_metrics_plot)
if len(evaluator.sample_functions) > 0 and evaluator.samples.shape[0] > 0:
path_to_trace_plot = os.path.join(path_to_fig, "trace.png")
logger.info("Plotting Sample Trace to {0}".format(path_to_trace_plot))
plt.close('all')
fig, axes = plot_trace_plot(evaluator)
fig.set_size_inches(12,10)
fig.savefig(path_to_trace_plot)
if len(evaluator.samples['iteration'].unique()) > 10:
path_to_zoom_trace_plot = \
os.path.join(path_to_fig, "trace_zoom.png")
logger.info("Plotting Zoom Trace to {0}".format(
path_to_zoom_trace_plot))
plt.close('all')
fig, axes = plot_trace_plot(evaluator, full_trace=False)
fig.set_size_inches(12,10)
fig.savefig(path_to_zoom_trace_plot)
return
def convert_gradient(gradient, parameters):
""" Convert gradient """
new_gradient = np.array([
gradient['log_mu'],
gradient['logit_phi'],
gradient['logit_lambduh'],
gradient['LRinv_vec']*(-np.asscalar(parameters.LRinv)**-1), # grad w.r.t. tau
]).flatten()
return new_gradient
###############################################################################
## Run Script ---------------------------------------------------------------
###############################################################################
if __name__=='__main__':
parser = construct_parser()
logging.info("Parsing Args")
args, extra = parser.parse_known_args()
logging.info("Args: %s", str(args))
if extra:
logging.warning("Unused Arguments: {0}".format(extra))
out = main(**vars(args))
logging.info("..Complete")
# EOF
| 53,197 | 38.729649 | 104 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/nonlinear_ssm_pf_experiment_scripts/garch/demo_setup.py | """
Create Setup Script for Demo Experiment
Usage:
0. Change `project_root` in this file to match the directory of `sgmcmc_nonlinear_ssm`
1. Run this script, which will create a `setup.sh` script at `<experiment_folder>`
(default is `./scratch/<experiment_name>/scripts/setup.sh`)
This will generate the train + test data, initializations + other scripts
2. Run the `fit.sh` script to fit the models specified in this file
generates output to `<experiment_folder>/out/fit`
3. Run `eval_train.sh` or `eval_test.sh` to evaluate the fits on the train or test data
4. Run `process_out.sh` to aggregate the results to `<experiment_folder>/processed/`
generates csv files that can be used to make figures
the main two csv files of interest are:
"aggregated.csv" and "options.csv" which can be joined together on experiment_id
"""
# Standard Imports
import numpy as np
import pandas as pd
import os
import sys
import joblib
from sklearn.model_selection import ParameterGrid
import logging
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
## Set Experiment Name
experiment_name = "garch_demo"
## Filesystem Paths
conda_env_name = None
project_root = "./" # Must be specified (path to "/sgmcmc_ssm_code")
os.chdir(project_root)
sys.path.append(os.getcwd()) # Fix Python Path
# Paths relative to project root
current_folder = os.path.join("nonlinear_ssm_pf_experiment_scripts", "garch")
python_script_path = os.path.join(current_folder,"driver.py")
experiment_folder = os.path.join("scratch", experiment_name) # Path to output
# Synthetic Data Args
data_reps = 1 # Number of training sets
T = 1000 # Training set size
T_test = 1000 # Test set size
init_methods = ['prior', 'truth'] * 1 # number of intializations + how they are initialized
# GARCH parameters
from sgmcmc_ssm.models.garch import (
GARCHParameters,
)
param_name = 'default'
alpha = 0.1
beta = 0.8
gamma = 0.05
R = np.eye(1)*0.3
log_mu, logit_phi, logit_lambduh = \
GARCHParameters.convert_alpha_beta_gamma(alpha, beta, gamma)
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = GARCHParameters(
log_mu=log_mu,
logit_phi=logit_phi,
logit_lambduh=logit_lambduh,
LRinv=LRinv,
)
parameters.project_parameters()
parameter_list = {param_name: parameters}
# Sampler Args
common_sampler_args = {
'inference_method': ['SGLD'],
'subsequence_length': [40],
'buffer_length': [10],
'minibatch_size': [1],
'steps_per_iteration': [10],
'max_num_iters': [10000],
'max_time': [300],
'epsilon': [0.01],
}
sampler_args = [
{
'method_name': ['POYIADJIS_N_1000'],
'kind': ['pf'],
'pf_kwargs': [dict(pf='poyiadjis_N', N=1000)],
**common_sampler_args
},
{
'method_name': ['NEMETH_1000'],
'kind': ['pf'],
'pf_kwargs': [dict(pf='nemeth', N=1000, lambduh=0.95)],
**common_sampler_args
},
{
'method_name': ['PARIS_100'],
'kind': ['pf'],
'pf_kwargs': [dict(pf='paris', N=100, Ntilde=2)],
**common_sampler_args
},
# {
# 'method_name': ['POYIADJIS_N2_100'],
# 'kind': ['pf'],
# 'pf_kwargs': [dict(pf='poyiadjis_N2', N=100)],
# **common_sampler_args
# },
]
# Script Kwargs (only really matters when using cluster)
setup_script_kwargs=dict(deploy_target='desktop')
fit_script_kwargs=dict(deploy_target='desktop')
eval_script_kwargs=dict(deploy_target='desktop')
process_out_script_kwargs=dict(deploy_target='desktop')
make_plots_script_kwargs=dict(deploy_target='desktop')
############################################################################
## MAIN SCRIPT
############################################################################
from sgmcmc_ssm.driver_utils import (
script_builder
)
if __name__ == "__main__":
# Setup Folders
logging.info("Creating Folder for {0}".format(experiment_name))
path_to_shell_script = os.path.join(experiment_folder, "scripts")
if not os.path.isdir(experiment_folder):
os.makedirs(experiment_folder)
if not os.path.isdir(path_to_shell_script):
os.makedirs(path_to_shell_script)
# Create Additional Args
path_to_additional_args = os.path.join(experiment_folder,
"setup_additional_args.p")
sampler_args = [arg
for args in sampler_args
for arg in list(ParameterGrid(args))
]
additional_args = dict(
sampler_args=sampler_args,
python_script_path=python_script_path,
path_to_shell_script=path_to_shell_script,
project_root=project_root,
experiment_name=experiment_name,
T=T,
T_test=T_test,
parameter_list=parameter_list,
data_reps=data_reps,
init_methods=init_methods,
conda_env_name=conda_env_name,
fit_script_kwargs=fit_script_kwargs,
eval_script_kwargs=eval_script_kwargs,
process_out_script_kwargs=process_out_script_kwargs,
make_plots_script_kwargs=make_plots_script_kwargs,
)
joblib.dump(additional_args, path_to_additional_args)
# Create Setup Script
bash_file_masters = script_builder(
script_name="setup",
python_script_path=python_script_path,
python_script_args=[{
"--experiment_folder": experiment_folder,
"--path_to_additional_args": path_to_additional_args,
"--setup": None,
}],
path_to_shell_script=path_to_shell_script,
project_root=project_root,
conda_env_name=conda_env_name,
**setup_script_kwargs,
)
logging.info("Run {0} to complete settting up {1}".format(
bash_file_masters[0], experiment_name))
# EOF
| 6,074 | 31.837838 | 91 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/nonlinear_ssm_pf_experiment_scripts/svm/driver.py | """ Experiment Driver
Call python <path_to_this_file>.py --help to see documentation
"""
import os
import sys
sys.path.append(os.getcwd()) # Fix Python Path
import numpy as np
import pandas as pd
import joblib
import time
import argparse
from tqdm import tqdm
import functools
import matplotlib
matplotlib.use('Agg') # For Cluster
import matplotlib.pyplot as plt
import seaborn as sns
import logging # For Logs
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
logger = logging.getLogger(name=__name__)
from sgmcmc_ssm.evaluator import (
SamplerEvaluator, OfflineEvaluator, half_average_parameters_list,
)
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
metric_function_parameters,
noisy_logjoint_loglike_metric,
)
from sgmcmc_ssm.driver_utils import (
script_builder, make_path, TqdmToLogger,
pandas_write_df_to_csv, joblib_write_to_file,
)
from sgmcmc_ssm.plotting_utils import (
plot_metrics, plot_trace_plot,
)
from sgmcmc_ssm.models.svm import (
SVMSampler,
SVMPrior,
generate_svm_data,
)
DEFAULT_OPTIONS = dict(
model_type = "SVM",
prior_variance = 100.0,
max_num_iters = 1000000,
max_time = 60,
eval_freq = 5,
max_eval_iterations = 1000,
max_eval_time = 60,
steps_per_iteration = 1,
checkpoint_num_iters = 1000,
checkpoint_time = 60*30,
)
## Script Argument Parser
def construct_parser():
""" Define script argument parser """
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
)
# Key Value Args
parser.add_argument("--experiment_folder",
help="path to experiment",
type=str,
)
parser.add_argument("--experiment_id",
default=0,
help="id of experiment (optional)",
type=int,
)
parser.add_argument("--path_to_additional_args", default="",
help="additional arguments to pass to setup",
type=str,
)
# Action Args
parser.add_argument("--setup", action='store_const', const=True,
help="flag for whether to setup data, inits, and fit/eval args",
)
parser.add_argument("--fit", action='store_const', const=True,
help="flag for whether to run sampler/optimization",
)
parser.add_argument("--eval", default="",
help="run evaluation of parameters on target data (e.g. 'train', 'test', 'half_avg_train')",
type=str,
)
parser.add_argument("--trace_eval", default="",
help="run evaluation on parameter trace (e.g. 'ksd', 'kstest')",
type=str,
)
parser.add_argument("--process_out", action='store_const', const=True,
help="flag for whether to aggregate output",
)
parser.add_argument("--make_plots", action='store_const', const=True,
help="flag for whether to plot aggregated output",
)
parser.add_argument("--make_scripts", action='store_const', const=True,
help="flag for setup to only recreate scripts",
)
return parser
## Main Dispatcher
def main(experiment_folder, experiment_id, path_to_additional_args,
setup, fit, eval, trace_eval, process_out, make_plots,
make_scripts, **kwargs):
""" Main Dispatcher see construct_parser for argument help """
if kwargs:
logger.warning("Unused kwargs: {0}".format(kwargs))
out = {}
if setup:
out['setup'] = do_setup(experiment_folder, path_to_additional_args)
make_scripts = True
logging.info("Extracting Options for experiment id {0}".format(
experiment_id))
path_to_arg_list = os.path.join(experiment_folder, "in", "options.p")
arg_list = joblib.load(path_to_arg_list)
experiment_options = arg_list[experiment_id]
logger.info("Experiment Options: {0}".format(experiment_options))
if make_scripts:
out['make_scripts'] = do_make_scripts(
experiment_folder, path_to_additional_args, arg_list)
if fit:
out['fit'] = do_fit(**experiment_options)
if eval != "":
for eval_ in eval.split(","):
if eval_ in ['train', 'half_avg_train', 'test', 'half_avg_test']:
out['eval_{0}'.format(eval_)] = do_eval(
target=eval_,
**experiment_options,
)
else:
raise ValueError("Unrecognized 'eval' target {0}".format(eval_))
if trace_eval != "":
for trace_eval_ in trace_eval.split(","):
if trace_eval_ == "ksd":
out['trace_eval_{0}'.format(trace_eval)] = do_eval_ksd(
**experiment_options,
)
elif trace_eval_ == "ess":
raise NotImplementedError()
elif trace_eval_ == "kstest":
out['trace_eval_{0}'.format(trace_eval)] = do_eval_ks_test(
**experiment_options,
)
else:
raise ValueError(
"Unrecognized 'trace_eval' target {0}".format(trace_eval_))
if process_out:
out['process_out'] = do_process_out(experiment_folder)
if make_plots:
out['make_plots'] = do_make_plots(experiment_folder)
if len(out.keys()) == 0:
raise ValueError("No Flags Set")
return out
## Setup Function
def do_setup(experiment_folder, path_to_additional_args):
""" Setup Shell Scripts for Experiment """
additional_args = joblib.load(path_to_additional_args)
# Setup Data
logger.info("Setting Up Data")
data_args = setup_train_test_data(experiment_folder, **additional_args)
# Setup
logger.info("Saving Experiment Options per ID")
sampler_args = additional_args['sampler_args']
arg_list = dict_product(sampler_args, data_args)
options_df = setup_options(experiment_folder, arg_list)
return options_df
## Make Scripts
def do_make_scripts(experiment_folder, path_to_additional_args, arg_list):
additional_args = joblib.load(path_to_additional_args)
options_df = pd.DataFrame(arg_list)
# Setup Shell Scripts
logger.info("Setting up Shell Scripts")
shell_args_base = [{
'--experiment_folder': experiment_folder,
'--experiment_id': experiment_id,
} for experiment_id in options_df['experiment_id']
]
# Fit Script
script_builder(
script_name = "fit",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--fit": None}) for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('fit_script_kwargs', {})
)
# Eval Scripts
script_builder(
script_name = "eval_train",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--eval": 'half_avg_train'})
for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('eval_script_kwargs', {})
)
script_builder(
script_name = "eval_test",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--eval": 'half_avg_test'})
for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('eval_script_kwargs', {})
)
script_builder(
script_name = "trace_eval",
python_script_path = additional_args['python_script_path'],
python_script_args = \
[update_dict(args, {"--trace_eval": 'ksd'})
for args in shell_args_base],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('eval_script_kwargs', {})
)
# Process Script
script_builder(
script_name = "process_out",
python_script_path = additional_args['python_script_path'],
python_script_args = [{
"--experiment_folder": experiment_folder,
"--process_out": None,
}],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('process_out_script_kwargs', {})
)
# Plot Script
script_builder(
script_name = "make_plots",
python_script_path = additional_args['python_script_path'],
python_script_args = [{
"--experiment_folder": experiment_folder,
"--make_plots": None,
}],
path_to_shell_script = additional_args['path_to_shell_script'],
project_root = additional_args['project_root'],
conda_env_name = additional_args.get('conda_env_name', None),
**additional_args.get('make_plots_script_kwargs', {})
)
# Run All Script
path_to_runall_script = os.path.join(
additional_args['path_to_shell_script'], 'run_all.sh')
with open(path_to_runall_script, 'w') as f:
f.write("#!/bin/bash\n")
f.write("cd {0}\n".format(additional_args['project_root']))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'fit.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'eval_train.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'eval_test.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'process_out.sh')))
f.write("{0}\n".format(os.path.join(
additional_args['path_to_shell_script'],'make_plots.sh')))
os.chmod(path_to_runall_script, 0o775)
logger.info("Run All Script at {0}".format(path_to_runall_script))
# Clear All Script
path_to_clear_script = os.path.join(
additional_args['path_to_shell_script'], 'clear_all.sh')
with open(path_to_clear_script, 'w') as f:
f.write("#!/bin/bash\n")
f.write("cd {0}\n".format(
os.path.join(additional_args['project_root'], experiment_folder)))
f.write("rm -r ./in ./out ./scratch ./fig\n".format(os.path.basename(
path_to_additional_args)))
f.write("cd {0}\n".format(
os.path.join(additional_args['project_root'],
additional_args['path_to_shell_script'])
))
f.write("rm -r ./fit ./eval_train ./eval_test ./process_out ./make_plots ./trace_eval\n")
os.chmod(path_to_clear_script, 0o775)
logger.info("Clear Script at {0}".format(path_to_clear_script))
return options_df
## Fit Module
def do_fit(
experiment_name, experiment_id,
experiment_folder, path_to_data, path_to_init,
model_type, prior_variance,
inference_method, eval_freq,
max_num_iters, steps_per_iteration, max_time,
checkpoint_num_iters, checkpoint_time,
**kwargs):
""" Fit function
Saves list of parameters + runtimes to <experiment_folder>/out/fit/
Args:
experiment_name, experiment_id - experiment id parameters
experiment_folder, path_to_data, path_to_init - paths to input + output
model_type, prior_variance - args for get_model_sampler_prior()
inference_method - get_model_sampler_step()
eval_freq - how frequently to eval metric funcs
max_num_iters, steps_per_iteration, max_time - how long to fit/train
checkpoint_num_iters, checkpoint_time - how frequent to checkpoint
**kwargs - contains inference_method kwargs
"""
logger.info("Beginning Experiment {0} for id:{1}".format(
experiment_name, experiment_id))
Sampler, Prior = get_model_sampler_prior(model_type)
# Make Paths
path_to_out = os.path.join(experiment_folder, "out", "fit")
path_to_fig = os.path.join(experiment_folder, "fig", "fit",
"{0:0>4}".format(experiment_id))
path_to_scratch = os.path.join(experiment_folder, 'scratch')
path_to_fit_state = os.path.join(path_to_scratch,
"fit_{0:0>4}_state.p".format(experiment_id))
make_path(path_to_out)
make_path(path_to_fig)
make_path(path_to_scratch)
# Load Train Data
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
observations = data['observations']
# Set Metric + Sample Functions for Evaluator
parameter_names = ['phi', 'sigma', 'tau']
sample_functions = [sample_function_parameters(parameter_names)]
metric_functions = []
if 'parameters' in data.keys():
metric_functions += [
metric_function_parameters(
parameter_names = parameter_names,
target_values = [getattr(data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names = ['logmse' for _ in parameter_names],
)
]
# Check if existing sampler and evaluator state exists
if os.path.isfile(path_to_fit_state):
logger.info("Continuing Evaluation from {0}".format(path_to_fit_state))
fit_state = joblib.load(path_to_fit_state)
init_parameters = fit_state['parameters']
parameters_list = fit_state['parameters_list']
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance, **init_parameters.dim
),
parameters=init_parameters,
)
evaluator = SamplerEvaluator(sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
init_state=fit_state['evaluator_state'],
)
else:
logger.info("Getting Init at {0}".format(path_to_init))
init_parameters = joblib.load(path_to_init)
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance, **init_parameters.dim
),
parameters=init_parameters,
)
evaluator = SamplerEvaluator(sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
parameters_list = [
dict(
iteration=evaluator.iteration,
elapsed_time=evaluator.elapsed_time,
parameters=evaluator.sampler.parameters.copy()
)
]
# Save Init Figures
logger.info("Saving Init Figures")
process_checkpoint(
evaluator=evaluator,
data=data,
parameters_list=parameters_list,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
checkpoint_num=evaluator.iteration,
)
# Sampler Funcs
sampler_func_names, sampler_func_kwargs = get_model_sampler_step(
model_type=model_type,
inference_method=inference_method,
steps_per_iteration=steps_per_iteration,
**kwargs
)
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(range(evaluator.iteration, max_num_iters),
file=tqdm_out, mininterval=60)
last_checkpoint_time = time.time()
last_eval_time = time.time() - eval_freq
start_time = time.time()
max_time_exceeded = False
for step in p_bar:
# Execute sampler_func_names
if (time.time() - start_time > max_time):
logger.info("Max Time Elapsed: {0} > {1}".format(
time.time() - start_time, max_time))
max_time_exceeded = True
try:
if (time.time() - last_eval_time > eval_freq) or \
(step == max_num_iters -1) or max_time_exceeded:
evaluator.evaluate_sampler_step(
sampler_func_names, sampler_func_kwargs, evaluate=True,
)
parameters_list.append(
dict(
iteration=evaluator.iteration,
elapsed_time=evaluator.elapsed_time,
parameters=evaluator.sampler.parameters.copy()
)
)
last_eval_time=time.time()
else:
evaluator.evaluate_sampler_step(
sampler_func_names, sampler_func_kwargs, evaluate=False,
)
except:
# Checkpoint On Error
process_checkpoint(
evaluator=evaluator,
data=data,
parameters_list=parameters_list,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
checkpoint_num=evaluator.iteration,
)
fit_state = evaluator.get_state()
logger.info("Saving Evaluator State to {0}".format(
path_to_fit_state))
joblib_write_to_file(
dict(evaluator_state=fit_state,
parameters=evaluator.sampler.parameters,
parameters_list=parameters_list),
path_to_fit_state)
raise RuntimeError()
# Check to Checkpoint Current Results
if (step % checkpoint_num_iters == 0) or \
(time.time() - last_checkpoint_time > checkpoint_time) or \
(step == max_num_iters-1) or max_time_exceeded:
process_checkpoint(
evaluator=evaluator,
data=data,
parameters_list=parameters_list,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
checkpoint_num=evaluator.iteration,
)
fit_state = evaluator.get_state()
logger.info("Saving Evaluator State to {0}".format(
path_to_fit_state))
joblib_write_to_file(
dict(evaluator_state=fit_state,
parameters=evaluator.sampler.parameters,
parameters_list=parameters_list),
path_to_fit_state)
# Reset Checkpoint Clock
last_checkpoint_time = time.time()
if max_time_exceeded:
break
return evaluator
## Evaluate Module
def do_eval(target,
experiment_name, experiment_id,
experiment_folder,
model_type, prior_variance,
max_eval_iterations, max_eval_time,
checkpoint_num_iters, checkpoint_time,
**kwargs):
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
logger.info("Beginning Evaluation of {0} id:{1} on {2}".format(
experiment_name, experiment_id, target,
))
Sampler, Prior = get_model_sampler_prior(model_type)
# Paths
path_to_parameters_list = os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(experiment_id))
path_to_out = os.path.join(experiment_folder, "out",
"eval{0}".format(target))
path_to_fig = os.path.join(experiment_folder, "fig",
"eval{0}".format(target),"{0:0>4}".format(experiment_id))
path_to_scratch = os.path.join(experiment_folder, 'scratch')
path_to_eval_state = os.path.join(path_to_scratch,
"eval{1}_{0:0>4}_state.p".format(experiment_id, target))
make_path(path_to_out)
make_path(path_to_fig)
make_path(path_to_scratch)
# Get Data
if target in ["train", "half_avg_train"]:
path_to_data = kwargs['path_to_data']
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
elif target in ["test", "half_avg_test"]:
path_to_data = kwargs['path_to_test_data']
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
else:
raise ValueError("Invalid target {0}".format(target))
# Setup Sampler
logger.info("Setting up Sampler")
path_to_init = kwargs['path_to_init']
init_parameters = joblib.load(path_to_init)
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
observations = data['observations']
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance,
**init_parameters.dim
),
)
# Set Metric + Sample Functions for Evaluator
parameter_names = ['phi', 'sigma', 'tau']
sample_functions = [sample_function_parameters(parameter_names)]
metric_functions = [noisy_logjoint_loglike_metric(kind='pf', N=5000),
noisy_predictive_logjoint_loglike_metric(kind='pf', num_steps_ahead=5, N=5000)]
if 'parameters' in data.keys():
metric_functions += [
metric_function_parameters(
parameter_names = parameter_names,
target_values = [getattr(data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names = ['logmse' for _ in parameter_names],
)
]
# if 'latent_vars' in data.keys():
# metric_functions += [metric_compare_x(true_x=data['latent_vars'])]
# Get parameters_list
logger.info("Getting Params from {0}".format(path_to_parameters_list))
parameters_list = joblib.load(path_to_parameters_list)
if target in ["half_avg_train", "half_avg_test"]:
logger.info("Calculating Running Average of Parameters")
parameters_list['parameters'] = \
half_average_parameters_list(parameters_list['parameters'])
# Setup Evaluator
logger.info("Setting up Evaluator")
# Check if existing evaluator state exists
if os.path.isfile(path_to_eval_state):
logger.info("Continuing Evaluation from {0}".format(path_to_eval_state))
eval_state = joblib.load(path_to_eval_state)
evaluator = OfflineEvaluator(sampler,
parameters_list=parameters_list,
metric_functions=metric_functions,
sample_functions=sample_functions,
init_state = eval_state,
)
else:
logger.info("Initializing Evaluation from scratch")
evaluator = OfflineEvaluator(sampler,
parameters_list=parameters_list,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
process_checkpoint(
evaluator=evaluator,
data=data,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
)
# Evaluation
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
logger.info("Found {0} parameters to eval".format(evaluator.num_to_eval()))
max_iterations = min([max_eval_iterations, evaluator.num_to_eval()])
p_bar = tqdm(range(max_iterations), file=tqdm_out, mininterval=60)
last_checkpoint_time = time.time() - checkpoint_time
start_time = time.time()
max_time_exceeded = False
for p_iter in p_bar:
if (time.time() - start_time > max_eval_time):
logger.info("Max Time Elapsed: {0} > {1}".format(
time.time() - start_time, max_eval_time))
max_time_exceeded = True
# Offline Evaluation
evaluator.evaluate(num_to_eval=1)
if ((time.time()-last_checkpoint_time) > checkpoint_time) or \
(p_iter == max_iterations-1) or max_time_exceeded:
process_checkpoint(
evaluator=evaluator,
data=data,
experiment_id=experiment_id,
path_to_out=path_to_out,
path_to_fig=path_to_fig,
)
eval_state = evaluator.get_state()
logger.info("Saving Evaluator State to {0}".format(path_to_eval_state))
joblib_write_to_file(eval_state, path_to_eval_state)
# Reset Checkpoint Clock
last_checkpoint_time = time.time()
if max_time_exceeded:
break
return evaluator
## Combine dfs from individual experiments
def do_process_out(experiment_folder):
""" Process Output
Aggregate files of form .../out/../{id}_{**}.csv
"""
path_to_out = os.path.join(experiment_folder, 'out')
path_to_options = os.path.join(experiment_folder, 'in', 'options.csv')
path_to_processed = os.path.join(experiment_folder, "processed")
make_path(path_to_processed)
subfolders = os.listdir(path_to_out)
# Copy Options to processed
logger.info("Copying Options")
options_df = pd.read_csv(path_to_options, index_col=False)
pandas_write_df_to_csv(options_df,
filename=os.path.join(path_to_processed, "options.csv"),
index=False)
# Try to Aggregate Data [evaltrain+evaltest, fit_metrics[time], options]
aggregated_columns = [
'iteration', 'metric', 'value', 'variable',
'eval_set', 'time', 'iteration_time', 'experiment_id',
]
evaltargets = ['evaltrain', 'evalhalf_avg_train',
'evaltest', 'evalhalf_avg_test']
if ('fit' in subfolders) and (len(set(subfolders).intersection(
set(evaltargets))) > 0):
path_to_aggregated_df = os.path.join(path_to_processed,"aggregated.csv")
logger.info("Aggregating Data to {0}".format(path_to_aggregated_df))
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(list(enumerate(options_df['experiment_id'].unique())),
file=tqdm_out, mininterval=60)
new_csv_flag = True
for ii, experiment_id in p_bar:
eval_df = pd.DataFrame()
for evaltarget in evaltargets:
# LOAD EVAL TARGET FILE
if evaltarget in subfolders:
eval_target_file = os.path.join(path_to_out, evaltarget,
'{0}_metrics.csv'.format(experiment_id),
)
if not is_valid_file(eval_target_file):
continue
eval_target_df = pd.read_csv(
eval_target_file, index_col=False,
).assign(eval_set=evaltarget)
eval_df = pd.concat([eval_df, eval_target_df],
ignore_index=True, sort=True)
# LOAD FIT FILE
fit_file = os.path.join(path_to_out, 'fit',
'{0}_metrics.csv'.format(experiment_id),
)
if not is_valid_file(fit_file):
continue
fit_df = pd.read_csv(fit_file, index_col=False)
fit_df = fit_df[fit_df['iteration'].isin(eval_df['iteration'])]
iteration_time = fit_df.query("metric == 'time'")[
['iteration', 'value']].rename(
columns={'value':'iteration_time'})
run_time = fit_df.query("metric == 'runtime'")[
['iteration', 'value']].rename(
columns={'value':'time'})
df = pd.merge(eval_df, iteration_time, how='left', on=['iteration'])
df = pd.merge(df, run_time, how='left', on=['iteration'])
df = df.sort_values('iteration').assign(experiment_id=experiment_id)
if new_csv_flag:
df[aggregated_columns].to_csv(path_to_aggregated_df,
index=False)
new_csv_flag = False
else:
df.reindex(columns=aggregated_columns).to_csv(
path_to_aggregated_df, mode='a', header=False,
index=False)
logger.info("Done Aggregating Data: {0}".format(path_to_aggregated_df))
# Also concat out folder csvs
for subfolder in subfolders:
# Only Process Folders
path_to_subfolder = os.path.join(path_to_out, subfolder)
if not os.path.isdir(path_to_subfolder):
logger.info("Ignoring file {0}".format(subfolder))
continue
logger.info("Combining Data in Folder {0}".format(path_to_subfolder))
filenames = os.listdir(path_to_subfolder)
# Combine Metrics
metric_filenames = [name for name in filenames
if name.endswith("metrics.csv")]
path_to_metric_df = os.path.join(path_to_processed,
"{0}_metrics.csv".format(subfolder))
logger.info("Aggregating Data to {0}".format(path_to_metric_df))
# Concat by appending to one large csv
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
p_bar = tqdm(list(enumerate(metric_filenames)), file=tqdm_out,
mininterval=60)
new_csv_flag = True
for ii, name in p_bar:
file_name = os.path.join(path_to_subfolder, name)
if not is_valid_file(file_name):
continue
metric_df = pd.read_csv(file_name, index_col=False)
metric_df['experiment_id'] = name.split("_")[0]
if new_csv_flag:
metric_df.to_csv(path_to_metric_df, index=False)
metric_df_columns = list(metric_df.columns.values)
new_csv_flag = False
else:
metric_df.reindex(columns=metric_df_columns).to_csv(
path_to_metric_df, mode='a', header=False, index=False)
logger.info("Metric Data Aggregated to {0}".format(path_to_metric_df))
return
## Make Quick Plots
def do_make_plots(experiment_folder):
""" Make quick plots based on aggregated.csv output of `do_process_out` """
path_to_processed = os.path.join(experiment_folder, 'processed')
path_to_fig = os.path.join(experiment_folder, 'fig', 'processed')
make_path(path_to_fig)
logger.info("Loading Data")
aggregated_df = pd.read_csv(
os.path.join(path_to_processed, 'aggregated.csv'))
options_df = pd.read_csv(
os.path.join(path_to_processed, 'options.csv'))
evaltargets = aggregated_df['eval_set'].unique()
logger.info("Making Plots for {0}".format(evaltargets))
for evaltarget in evaltargets:
logger.info("Processing Data for {0}".format(evaltarget))
sub_df = pd.merge(
aggregated_df[aggregated_df['eval_set'] == evaltarget],
options_df[['method_name', 'experiment_id']],
on='experiment_id',
)
sub_df['variable_metric'] = sub_df['variable'] + '_' + sub_df['metric']
logger.info("Plotting metrics vs time for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='time', y='value', hue='method_name', kind='line',
col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df,
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_time.png'.format(evaltarget)))
logger.info("Plotting metrics vs iteration for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='iteration', y='value', hue='method_name',
kind='line', col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df,
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_iteration.png'.format(evaltarget)))
## After Burnin
if sub_df.query('iteration > 100').shape[0] > 0:
logger.info("Plotting metrics vs time after burnin for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='time', y='value', hue='method_name', kind='line',
col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df.query('iteration > 100'),
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_time_burnin.png'.format(evaltarget)))
logger.info("Plotting metrics vs iteration for {0}".format(evaltarget))
plt.close('all')
g = sns.relplot(x='iteration', y='value', hue='method_name',
kind='line', col='variable_metric', col_wrap=3,
estimator=None, units='experiment_id',
data=sub_df.query('iteration > 100'),
facet_kws=dict(sharey=False),
)
g.fig.set_size_inches(12, 10)
g.savefig(os.path.join(path_to_fig,
'{0}_metric_vs_iteration_burnin.png'.format(evaltarget)))
return
## Evaluate Parameter Sample Quality
def do_eval_ksd(
experiment_name, experiment_id,
experiment_folder,
model_type, prior_variance,
max_eval_iterations, max_eval_time,
checkpoint_num_iters, checkpoint_time,
ksd_burnin=0.33, ksd_subsequence_length=1000, ksd_buffer_length=10,
**kwargs):
""" Evaluate the Kernelized Stein Divergence
Pseudocode:
Load Train Data + Setup Sampler
Load Parameter Trace for Experiment Id (apply burnin)
For each parameter, calculate the gradient of the logjoint
(if using noisy gradients, take average over multiple replications)
Compute KSD at each checkpoint
Checkpoints results to out/eval_ksd for each experiment_id
"""
max_eval_time = max(max_eval_time, 8*60*60)
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
from sgmcmc_ssm.trace_metric_functions import compute_KSD
GRAD_DIM = 3
GRAD_VARIABLES = ['phi', 'sigma', 'tau']
logger.info("Beginning KSD Evaluation of {0} id:{1}".format(
experiment_name, experiment_id,
))
# Paths
path_to_parameters_list = os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(experiment_id))
path_to_out = os.path.join(experiment_folder, "out",
"trace_eval_ksd")
# path_to_fig = os.path.join(experiment_folder, "fig",
# "trace_eval_ksd","{0:0>4}".format(experiment_id))
path_to_scratch = os.path.join(experiment_folder, 'scratch')
path_to_checkpoint_state = os.path.join(path_to_scratch,
"trace_eval_ksd_{0:0>4}_state.p".format(experiment_id))
make_path(path_to_out)
# make_path(path_to_fig)
make_path(path_to_scratch)
# Load Train Data + Setup Sampler
Sampler, Prior = get_model_sampler_prior(model_type)
path_to_data = kwargs['path_to_data']
logger.info("Getting Data at {0}".format(path_to_data))
data = joblib.load(path_to_data)
logger.info("Setting up Sampler")
path_to_init = kwargs['path_to_init']
init_parameters = joblib.load(path_to_init)
sampler = Sampler(
name=experiment_id,
**init_parameters.dim
)
observations = data['observations']
sampler.setup(
observations=observations,
prior=Prior.generate_default_prior(
var=prior_variance,
**init_parameters.dim
),
)
if not os.path.isfile(path_to_checkpoint_state):
# Load parameter_list
logger.info("Getting Params from {0}".format(path_to_parameters_list))
parameters_list = joblib.load(path_to_parameters_list).copy()
# Apply Burnin
parameters_list = parameters_list.iloc[int(parameters_list.shape[0]*ksd_burnin):]
parameters_list['num_ksd_eval'] = 0.0
parameters_list['grad'] = [np.zeros(GRAD_DIM) for _ in range(parameters_list.shape[0])]
metrics_df = pd.DataFrame()
cur_param_index = 0
logger.info("Calculating KSD on {0} parameters".format(
parameters_list.shape[0]))
else:
# Load metrics_df + parameter_list from checkpoint
logger.info("Loading parameters from previous checkpoint")
checkpoint_state = joblib.load(path_to_checkpoint_state)
parameters_list = checkpoint_state['parameters_list']
metrics_df = checkpoint_state['metrics_df']
cur_param_index = checkpoint_state['cur_param_index']
logger.info("Found {0} parameters with at least {1} evals".format(
parameters_list.shape[0], parameters_list['num_ksd_eval'].min()))
# Terminate after 1 pass if exact KSD
if (ksd_subsequence_length == -1) or \
(ksd_subsequence_length >= data['observations'].shape[0]):
if (cur_param_index == 0) and \
(parameters_list['num_ksd_eval'].min() >= 1):
logger.info("Already computed exact KSD")
return metrics_df
max_iterations = max_eval_iterations*parameters_list.shape[0]
start_time = time.time()
max_time_exceeded = False
last_checkpoint_time = time.time()
p_bar = tqdm(range(max_iterations), file=tqdm_out, mininterval=300)
for ii in p_bar:
if (time.time() - start_time > max_eval_time):
logger.info("Max Time Elapsed: {0} > {1}".format(
time.time() - start_time, max_eval_time))
max_time_exceeded = True
parameters = parameters_list['parameters'].iloc[cur_param_index]
sampler.parameters = parameters
grad = convert_gradient(
gradient = sampler.noisy_gradient(
kind="pf", pf='poyiadjis_N', N=10000,
subsequence_length=ksd_subsequence_length,
buffer_length=ksd_buffer_length,
is_scaled=False,
# tqdm=functools.partial(tqdm,
# file=tqdm_out, mininterval=60),
),
parameters=parameters,
)
index = parameters_list.index[cur_param_index]
parameters_list.at[index,'grad'] += grad
parameters_list.at[index,'num_ksd_eval'] += 1.0
# Update parameter index for next loop
cur_param_index += 1
if cur_param_index == parameters_list.shape[0]:
logger.info("Completed {0} passes over all parameters".format(
parameters_list['num_ksd_eval'].min()))
cur_param_index = 0
# Checkpoint Results
if ((time.time() - last_checkpoint_time > checkpoint_time) or
(cur_param_index == 0) or (ii+1 == max_eval_iterations) or
max_time_exceeded):
# Compute KSD
sub_list = parameters_list[parameters_list['num_ksd_eval'] > 0]
param_list = sub_list['parameters']
grad_list = sub_list['grad'] / sub_list['num_ksd_eval']
result_dict = compute_KSD(
param_list=param_list.tolist(), grad_list=grad_list.tolist(),
variables=GRAD_VARIABLES,
max_block_size=512, # Block Size for computing kernel
)
new_metric_df = pd.DataFrame([
dict(metric='ksd', variable=key, value=value,
num_samples = cur_param_index-1,
num_evals = parameters_list['num_ksd_eval'].min(),
) for key, value in result_dict.items()
])
metrics_df = pd.concat([metrics_df, new_metric_df],
ignore_index=True, sort=True)
# Save Metrics DF to CSV
path_to_metrics_file = os.path.join(path_to_out,
"{0}_metrics.csv".format(experiment_id))
logger.info("Saving KSD metrics to {0}".format(path_to_metrics_file))
pandas_write_df_to_csv(metrics_df, path_to_metrics_file, index=False)
# Checkpoint State
logger.info("Saving checkpoint to {0}".format(
path_to_checkpoint_state))
joblib_write_to_file(dict(
parameters_list=parameters_list,
metrics_df=metrics_df,
cur_param_index=cur_param_index,
), path_to_checkpoint_state)
# Reset Checkpoint Clock
last_checkpoint_time = time.time()
# # Terminate after 1 pass if exact KSD # Not possible when using PF
# if (ksd_subsequence_length == -1) or \
# (ksd_subsequence_length >= data['observations'].shape[0]):
# if cur_param_index == 0:
# break
# Terminate if max_time_exceeded
if max_time_exceeded:
break
return metrics_df
def do_eval_ks_test(
experiment_name, experiment_id,
experiment_folder,
model_type, prior_variance,
max_eval_iterations, max_eval_time,
checkpoint_num_iters, checkpoint_time,
kstest_burnin=0.33, kstest_variables=None,
path_to_reference_parameter_list=None,
**kwargs):
""" Evaluate KS-Test statistic
KS-Test between
experiment_id trace (after burnin) and
reference_parameter_list
Args:
kstest_burnin (double): fraction of samples to discard as burnin
path_to_reference_parameter_list (path): path to reference_parameter_list
(loads using joblib),
if None, then uses all Gibbs samples (after burnin)
Pseudocode:
Load Reference Parameter Trace
Load Parameter Trace for Experiment Id (apply burnin)
For each parameter, calculate the gradient of the logjoint
(if using noisy gradients, take average over multiple replications)
Compute KSD at each checkpoint
Checkpoints results to out/eval_ksd for each experiment_id
"""
from scipy.stats import ks_2samp
if kstest_variables is None:
kstest_variables = ['phi', 'sigma', 'tau']
logger.info("Beginning KS Test Evaluation of {0} id:{1}".format(
experiment_name, experiment_id,
))
# Paths
path_to_parameters_list = os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(experiment_id))
path_to_out = os.path.join(experiment_folder, "out",
"trace_eval_kstest")
path_to_fig = os.path.join(experiment_folder, "fig",
"trace_eval_kstest")
make_path(path_to_out)
make_path(path_to_fig)
# Load Reference Parameter Trace
if path_to_reference_parameter_list is None:
# Check options for path to Gibbs parameter traces
path_to_options = os.path.join(experiment_folder, 'in', 'options.p')
options_df = pd.DataFrame(joblib.load(path_to_options))
sub_df = options_df[options_df['path_to_data'] == kwargs['path_to_data']]
gibbs_options = sub_df[sub_df['inference_method'] == "Gibbs"]
if gibbs_options.empty:
logger.warning("No Gibbs / Ground Truth examples found. Skipping KS Test")
return
path_to_traces = [
os.path.join(experiment_folder, "out", "fit",
"{0}_parameters.p".format(row['experiment_id']))
for _, row in gibbs_options.iterrows()
]
reference_parameters_list = []
for path_to_trace in path_to_traces:
ref_param_list = joblib.load(path_to_trace)[['parameters']]
ref_param_list = ref_param_list.iloc[
int(ref_param_list.shape[0]*kstest_burnin):]
reference_parameters_list.append(ref_param_list)
reference_parameters = pd.concat(reference_parameters_list,
ignore_index=True, sort=True)
# Load Experiment ID Parameter Trace
logger.info("Getting Params from {0}".format(path_to_parameters_list))
parameters_list = joblib.load(path_to_parameters_list)
# Apply Burnin
parameters_list = parameters_list.iloc[int(parameters_list.shape[0]*0.33):]
parameters_list = parameters_list[['parameters']]
# Calculate KSTest for each variable
metrics_df = pd.DataFrame()
cur_param_index = 0
logger.info("Calculating KS-Test on {0} parameters".format(
parameters_list.shape[0]))
results = []
plt.close('all')
fig, axes = plt.subplots(1, len(kstest_variables), sharey=False)
for ii, variable in enumerate(kstest_variables):
data_ref = np.array([getattr(param, variable)
for param in reference_parameters['parameters']]).flatten()
data_samp = np.array([getattr(param, variable)
for param in parameters_list['parameters']]).flatten()
statistic, pvalue = ks_2samp(data_samp, data_ref)
results.append(dict(metric='kstest', variable=variable,
value=statistic))
results.append(dict(metric='kstest_pvalue', variable=variable,
value=pvalue))
sns.distplot(data_ref, ax=axes[ii], label='ref')
sns.distplot(data_samp, ax=axes[ii], label='samp')
if pvalue < 0.05:
axes[ii].set_title('{0}\n KS-value: {1:1.2e} ({2:1.2e}*)'.format(
variable, statistic, pvalue))
else:
axes[ii].set_title('{0}\n KS-value: {1:1.2e} ({2:1.2e})'.format(
variable, statistic, pvalue))
axes[-1].legend()
fig.set_size_inches(4*len(kstest_variables), 7)
fig.savefig(os.path.join(path_to_fig, "{0}_trace_density.png".format(
experiment_id)))
results.append(dict(metric='num_samples', variable="trace",
value=parameters_list.shape[0]))
metrics_df = pd.DataFrame(results)
# Save Metrics DF to CSV
path_to_metrics_file = os.path.join(path_to_out,
"{0}_metrics.csv".format(experiment_id))
logger.info("Metrics:\n{0}".format(metrics_df))
logger.info("Saving KSTest metrics to {0}".format(path_to_metrics_file))
pandas_write_df_to_csv(metrics_df, path_to_metrics_file, index=False)
return metrics_df
###############################################################################
## Experiment Specific Functions
###############################################################################
def setup_train_test_data(experiment_folder, experiment_name, T, T_test,
parameter_list, data_reps, init_methods, path_to_existing=None,
**kwargs):
""" Setup Synthetic Data """
# Setup Input Folder
path_to_input = os.path.join(experiment_folder, "in")
if not os.path.isdir(path_to_input):
os.makedirs(path_to_input)
# Generate Training + Test Data
if path_to_existing is None:
logger.info("Generating Training Data + Inits")
else:
logger.info("Copying Training Data + Inits from {}".format(
path_to_existing))
input_args = []
# Create + Save Test Data (shared among training sets)
for param_num, (param_name, parameters) in enumerate(parameter_list.items()):
test_data_name = "test_data"
path_to_test_data = os.path.join(path_to_input,
"{0}.p".format(test_data_name))
if path_to_existing is None:
test_data = generate_svm_data(T=T_test, parameters=parameters)
else:
path_to_existing_test_data = os.path.join(path_to_existing,
"{0}.p".format(test_data_name))
test_data = joblib.load(path_to_existing_test_data)
joblib.dump(test_data, path_to_test_data)
for data_rep in range(data_reps):
# Create + Save Training Data
data_name = "train_data_{0}".format(data_rep+data_reps*param_num)
path_to_data = os.path.join(path_to_input,
"{0}.p".format(data_name))
if path_to_existing is None:
train_data = generate_svm_data(T=T, parameters=parameters)
else:
path_to_existing_data = os.path.join(path_to_existing,
"{0}.p".format(data_name))
train_data = joblib.load(path_to_existing_data)
joblib.dump(train_data, path_to_data)
# Generate Inits
for init_num, init_method in enumerate(init_methods):
# Create + Save Init
path_to_init = os.path.join(path_to_input,
"{0}_init_{1}.p".format(data_name, init_num))
if path_to_existing is None:
logger.info("Generating Init {0} of {1}".format(
init_num, len(init_methods)))
setup_init(
data=train_data,
init_method=init_method,
path_to_init=path_to_init,
)
else:
logger.info("Copying Init {0} of {1}".format(
init_num, len(init_methods)))
path_to_existing_init = os.path.join(path_to_existing,
"{0}_init_{1}.p".format(data_name, init_num))
init_parameters = joblib.load(path_to_existing_init)
joblib.dump(init_parameters, path_to_init)
input_args.append({
'experiment_name': experiment_name,
'path_to_data': path_to_data,
'path_to_test_data': path_to_test_data,
'path_to_init': path_to_init,
'param_name': param_name,
'init_method': init_method,
})
return input_args
def setup_init(data, init_method, path_to_init, n=1, m=1):
""" Setup Init Parameters for data """
if init_method == "prior":
prior = SVMPrior.generate_default_prior(n=n, m=m)
sampler = SVMSampler(n=n, m=m)
sampler.setup(observations=data['observations'],
prior=prior)
sampler.project_parameters()
init_parameters = sampler.parameters
elif init_method == "truth":
init_parameters = data['parameters']
else:
raise ValueError("Unrecognized init_method")
joblib.dump(init_parameters, path_to_init)
return init_parameters
def setup_options(experiment_folder, arg_list):
# Create Options csv in <experiment_folder>/in
path_to_input = os.path.join(experiment_folder, "in")
if not os.path.isdir(path_to_input):
os.makedirs(path_to_input)
# Sort Arg List by Data x Init Trial
arg_list = sorted(arg_list,
key = lambda k: (k['path_to_data'], k['path_to_init']))
# Assign Experiment ID + Experiment Folder Location
for ii, custom_dict in enumerate(arg_list):
# Set Defaults
arg_dict = DEFAULT_OPTIONS.copy()
arg_dict.update(custom_dict)
arg_dict["experiment_id"] = ii
arg_dict["experiment_folder"] = experiment_folder
arg_list[ii] = arg_dict
path_to_arg_list = os.path.join(path_to_input, "options.p")
logger.info("Saving arg_list as {0}".format(path_to_arg_list))
joblib.dump(arg_list, path_to_arg_list)
options_df = pd.DataFrame(arg_list)
path_to_options_file = os.path.join(path_to_input,"options.csv")
logger.info("Also saving as csv at {0}".format(path_to_options_file))
options_df.to_csv(path_to_options_file, index=False)
return options_df
def get_model_sampler_prior(model_type):
if model_type == "SVM":
Sampler = SVMSampler
Prior = SVMPrior
else:
raise NotImplementedError()
return Sampler, Prior
def get_model_sampler_step(
model_type, inference_method, steps_per_iteration,
epsilon, minibatch_size, subsequence_length, buffer_length,
**kwargs):
""" Returns sampler_func_names + sampler_func_kwargs for SamplerEvaluator"""
step_kwargs = dict(
epsilon = epsilon,
minibatch_size = minibatch_size,
subsequence_length = subsequence_length,
buffer_length = buffer_length,
kind = kwargs.get("kind", "marginal"),
num_samples = kwargs.get("num_samples", None),
**kwargs.get("pf_kwargs", {})
)
if inference_method in ['SGRD', 'SGRLD']:
if 'preconditioner' not in step_kwargs.keys():
raise NotImplementedError()
# step_kwargs['preconditioner'] = LGSSMPreconditioner()
if inference_method == 'SGD':
sampler_func_names = ['step_sgd', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'ADAGRAD':
sampler_func_names = ['step_adagrad', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'SGRD':
sampler_func_names = ['step_sgd', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'SGLD':
sampler_func_names = ['sample_sgld', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'SGRLD':
sampler_func_names = ['sample_sgrld', 'project_parameters']
sampler_func_kwargs = [step_kwargs, {}]
elif inference_method == 'Gibbs':
sampler_func_names = ["sample_gibbs", "project_parameters"]
sampler_func_kwargs = [{}, {}]
sampler_func_names = sampler_func_names * steps_per_iteration
sampler_func_kwargs = sampler_func_kwargs * steps_per_iteration
return sampler_func_names, sampler_func_kwargs
###############################################################################
## Helper / Utility Functions
def dict_product(*args):
# Combine a list of dictionary lists
from itertools import product
return [ {k:v for d in L for k,v in d.items()} for L in product(*args)]
def update_dict(ldict, rdict):
""" Update ldict with key, value pairs from rdict """
updated_dict = ldict.copy()
updated_dict.update(rdict)
return updated_dict
def is_valid_file(filename):
# Check filename exists + is not empty
if not os.path.isfile(filename):
logging.info("Missing File {0}".format(filename))
return False
elif os.path.getsize(filename) <= 1:
# File is currently being written
logging.info("Pausing for 5.0 sec for {0}".format(filename))
time.sleep(5.0)
if os.path.getsize(filename) <= 1:
logging.info("== EMPTY File {0} ==".format(filename))
return False
else:
return True
def process_checkpoint(evaluator, data, experiment_id,
path_to_out, path_to_fig,
checkpoint_num=0, parameters_list=None,
**kwargs):
""" Save Checkpoint """
# Save Metrics
path_to_metrics_file = os.path.join(path_to_out,
"{0}_metrics.csv".format(experiment_id))
logger.info("Saving Metrics to {0}".format(path_to_metrics_file))
pandas_write_df_to_csv(df=evaluator.get_metrics(),
filename=path_to_metrics_file, index=False)
if parameters_list is not None:
path_to_parameters_list = os.path.join(path_to_out,
"{0}_parameters.p".format(experiment_id))
logger.info("Saving Parameters List to {0}".format(
path_to_parameters_list))
joblib_write_to_file(pd.DataFrame(parameters_list),
filename=path_to_parameters_list)
if len(evaluator.metric_functions) > 0 and evaluator.metrics.shape[0] > 0:
path_to_metrics_plot = os.path.join(path_to_fig, "metrics.png")
logger.info("Plotting Metrics to {0}".format(path_to_metrics_plot))
plt.close('all')
g = plot_metrics(evaluator)
g.fig.set_size_inches(12,10)
g.savefig(path_to_metrics_plot)
if len(evaluator.metrics['iteration'].unique()) > 10:
path_to_zoom_metrics_plot = \
os.path.join(path_to_fig, "metrics_zoom.png")
logger.info("Plotting Zoom Metrics to {0}".format(
path_to_zoom_metrics_plot))
plt.close('all')
g = plot_metrics(evaluator, full_trace=False)
g.fig.set_size_inches(12,10)
g.savefig(path_to_zoom_metrics_plot)
if len(evaluator.sample_functions) > 0 and evaluator.samples.shape[0] > 0:
path_to_trace_plot = os.path.join(path_to_fig, "trace.png")
logger.info("Plotting Sample Trace to {0}".format(path_to_trace_plot))
plt.close('all')
fig, axes = plot_trace_plot(evaluator)
fig.set_size_inches(12,10)
fig.savefig(path_to_trace_plot)
if len(evaluator.samples['iteration'].unique()) > 10:
path_to_zoom_trace_plot = \
os.path.join(path_to_fig, "trace_zoom.png")
logger.info("Plotting Zoom Trace to {0}".format(
path_to_zoom_trace_plot))
plt.close('all')
fig, axes = plot_trace_plot(evaluator, full_trace=False)
fig.set_size_inches(12,10)
fig.savefig(path_to_zoom_trace_plot)
return
def convert_gradient(gradient, parameters):
""" Convert gradient w.r.t. LRinv, LQinv, C, A to gradient w.r.t phi, sigma, tau """
new_gradient = np.array([
gradient['A'], # grad w.r.t. A <-> grad w.r.t. phi
gradient['LQinv_vec']*(-np.asscalar(parameters.LQinv)**-1), # grad w.r.t. sigma
gradient['LRinv_vec']*(-np.asscalar(parameters.LRinv)**-1), # grad w.r.t. tau
]).flatten()
return new_gradient
###############################################################################
## Run Script ---------------------------------------------------------------
###############################################################################
if __name__=='__main__':
parser = construct_parser()
logging.info("Parsing Args")
args, extra = parser.parse_known_args()
logging.info("Args: %s", str(args))
if extra:
logging.warning("Unused Arguments: {0}".format(extra))
out = main(**vars(args))
logging.info("..Complete")
# EOF
| 59,557 | 38.918231 | 104 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/nonlinear_ssm_pf_experiment_scripts/svm/demo_setup.py | """
Create Setup Script for Demo Experiment
Usage:
0. Change `project_root` in this file to match the directory of `sgmcmc_nonlinear_ssm`
1. Run this script, which will create a `setup.sh` script at `<experiment_folder>`
(default is `./scratch/<experiment_name>/scripts/setup.sh`)
This will generate the train + test data, initializations + other scripts
2. Run the `fit.sh` script to fit the models specified in this file
generates output to `<experiment_folder>/out/fit`
3. Run `eval_train.sh` or `eval_test.sh` to evaluate the fits on the train or test data
4. Run `process_out.sh` to aggregate the results to `<experiment_folder>/processed/`
generates csv files that can be used to make figures
the main two csv files of interest are:
"aggregated.csv" and "options.csv" which can be joined together on experiment_id
"""
# Standard Imports
import numpy as np
import pandas as pd
import os
import sys
import joblib
from sklearn.model_selection import ParameterGrid
import logging
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
## Set Experiment Name
experiment_name = "svm_demo"
## Filesystem Paths
conda_env_name = None
project_root = './' # Must be specified (path to "/sgmcmc_ssm_code/")
os.chdir(project_root)
sys.path.append(os.getcwd()) # Fix Python Path
# Paths relative to project root
current_folder = os.path.join("nonlinear_ssm_pf_experiment_scripts", "svm")
python_script_path = os.path.join(current_folder,"driver.py")
experiment_folder = os.path.join("scratch", experiment_name) # Path to output
# Synthetic Data Args
data_reps = 1 # Number of training sets
T = 1000 # Training set size
T_test = 1000 # Test set size
init_methods = ['prior', 'truth'] * 1 # number of intializations + how they are initialized
# SVM parameters
from sgmcmc_ssm.models.svm import (
SVMParameters,
)
param_name = 'A=0.95,Q=0.5,R=0.5'
A = np.eye(1)*0.95
Q = np.eye(1)*0.5
R = np.eye(1)*0.5
LQinv = np.linalg.cholesky(np.linalg.inv(Q))
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = SVMParameters(A=A, LQinv=LQinv, LRinv=LRinv)
parameters.project_parameters()
parameter_list = {param_name: parameters}
# Sampler Args
common_sampler_args = {
'inference_method': ['SGLD'],
'subsequence_length': [40],
'buffer_length': [10],
'minibatch_size': [1],
'steps_per_iteration': [10],
'max_num_iters': [10000],
'max_time': [300],
'epsilon': [0.1],
}
sampler_args = [
{
'method_name': ['POYIADJIS_N_1000'],
'kind': ['pf'],
'pf_kwargs': [dict(pf='poyiadjis_N', N=1000)],
**common_sampler_args
},
{
'method_name': ['NEMETH_1000'],
'kind': ['pf'],
'pf_kwargs': [dict(pf='nemeth', N=1000, lambduh=0.95)],
**common_sampler_args
},
{
'method_name': ['PARIS_100'],
'kind': ['pf'],
'pf_kwargs': [dict(pf='paris', N=100, Ntilde=2)],
**common_sampler_args
},
# {
# 'method_name': ['POYIADJIS_N2_100'],
# 'kind': ['pf'],
# 'pf_kwargs': [dict(pf='poyiadjis_N2', N=100)],
# **common_sampler_args
# },
]
# Script Kwargs (only really matters when using cluster)
setup_script_kwargs=dict(deploy_target='desktop')
fit_script_kwargs=dict(deploy_target='desktop')
eval_script_kwargs=dict(deploy_target='desktop')
process_out_script_kwargs=dict(deploy_target='desktop')
make_plots_script_kwargs=dict(deploy_target='desktop')
############################################################################
## MAIN SCRIPT
############################################################################
from sgmcmc_ssm.driver_utils import (
script_builder
)
if __name__ == "__main__":
# Setup Folders
logging.info("Creating Folder for {0}".format(experiment_name))
path_to_shell_script = os.path.join(experiment_folder, "scripts")
if not os.path.isdir(experiment_folder):
os.makedirs(experiment_folder)
if not os.path.isdir(path_to_shell_script):
os.makedirs(path_to_shell_script)
# Create Additional Args
path_to_additional_args = os.path.join(experiment_folder,
"setup_additional_args.p")
sampler_args = [arg
for args in sampler_args
for arg in list(ParameterGrid(args))
]
additional_args = dict(
sampler_args=sampler_args,
python_script_path=python_script_path,
path_to_shell_script=path_to_shell_script,
project_root=project_root,
experiment_name=experiment_name,
T=T,
T_test=T_test,
parameter_list=parameter_list,
data_reps=data_reps,
init_methods=init_methods,
conda_env_name=conda_env_name,
fit_script_kwargs=fit_script_kwargs,
eval_script_kwargs=eval_script_kwargs,
process_out_script_kwargs=process_out_script_kwargs,
make_plots_script_kwargs=make_plots_script_kwargs,
)
joblib.dump(additional_args, path_to_additional_args)
# Create Setup Script
bash_file_masters = script_builder(
script_name="setup",
python_script_path=python_script_path,
python_script_args=[{
"--experiment_folder": experiment_folder,
"--path_to_additional_args": path_to_additional_args,
"--setup": None,
}],
path_to_shell_script=path_to_shell_script,
project_root=project_root,
conda_env_name=conda_env_name,
**setup_script_kwargs,
)
logging.info("Run {0} to complete settting up {1}".format(
bash_file_masters[0], experiment_name))
# EOF
| 5,924 | 32.100559 | 91 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/nonlinear_ssm_pf_experiment_scripts/gradient_error_fig_scripts/svm_grad_compare.py | #!/usr/bin/python3
#This script is being used to test the particle gradient approximations (full data)
#######IMPORT RELEVANT MODULES######################
import numpy as np
import pandas as pd
import time
import joblib
import os
from sgmcmc_ssm.models.svm import ( SVMParameters,
SVMSampler,
SVMPrior,
generate_svm_data,
SVMHelper,
)
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
### Main Function
def make_plots(T, L, N_reps, N_trials, pars, buffer_sizes, path_to_out, seed=12345, save_dat=True):
print("\n===========================================================")
print("T = {0}, L = {4}, N_reps = {1}, N_trial={5}, pars = {2}, buffer_sizes = {3}".format(
T, N_reps, pars, buffer_sizes, L, N_trials))
print("===========================================================\n")
np.random.seed(seed)
# Generate Data
A = np.eye(1) * pars[0]
Q = np.eye(1) * pars[1]
R = np.eye(1) * pars[2]
LQinv = np.linalg.cholesky(np.linalg.inv(Q))
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = SVMParameters(A=A, LQinv=LQinv, LRinv=LRinv)
def convert_gradient(grad_dict):
return [
grad_dict['A'],
grad_dict['LQinv_vec'],
grad_dict['LRinv_vec'],
]
results_dfs = []
for trial in tqdm(range(N_trials), desc="Trial"):
data = generate_svm_data(T=T, parameters=parameters, tqdm=tqdm)
t0 = (T+L)//2
observations = data['observations']
helper = SVMHelper(forward_message=data['initial_message'],
**parameters.dim)
# Compute Exact (Full Buffered Gradient)
start_time = time.time()
full_buffer_gradients = [None]*10
pbar = tqdm(range(10))
pbar.set_description('Number of Reps')
buffer_size = L
pf_kwargs = dict(
observations=observations[t0-buffer_size:t0+L+buffer_size],
parameters=parameters,
kernel=None,
subsequence_start = buffer_size,
subsequence_end = L+buffer_size,
pf='poyiadjis_N',
N=1000000,
tqdm=tqdm,
)
for rep in pbar:
full_buffer_gradients[rep] = convert_gradient(
helper.pf_gradient_estimate(
**pf_kwargs,
))
full_buffer_gradient = np.mean(full_buffer_gradients, axis=0)
full_buffer_gradient_sd = np.std(full_buffer_gradients, axis=0)
print(full_buffer_gradient)
print(full_buffer_gradient_sd)
full_buffer_time = time.time() - start_time
estimates_bs = [dict(
poyiadjis_100=[], poyiadjis_1000=[], poyiadjis_10000=[])
for _ in range(len(buffer_sizes))]
runtimes_bs = [{key:[] for key in estimates_bs[0].keys()}
for _ in range(len(buffer_sizes))]
pbar_bs = tqdm(zip(buffer_sizes, estimates_bs, runtimes_bs),
desc="buffer size",
total=len(buffer_sizes))
for buffer_size, estimates, runtimes in pbar_bs:
pf_kwargs = dict(
observations=observations[t0-buffer_size:t0+L+buffer_size],
parameters=parameters,
kernel=None,
subsequence_start = buffer_size,
subsequence_end = L+buffer_size,
tqdm=tqdm,
)
pbar = tqdm(range(N_reps))
pbar.set_description('Number of Reps')
for rep in pbar:
# Poyiadjis N Smoother
start_time = time.time()
pf_kwargs.update(N=100, pf="poyiadjis_N")
poy_estimate = convert_gradient(helper.pf_score_estimate(**pf_kwargs))
estimates['poyiadjis_100'].append(poy_estimate)
runtimes['poyiadjis_100'].append(time.time() - start_time)
# Poyiadjis N Smoother
start_time = time.time()
pf_kwargs.update(N=1000, pf="poyiadjis_N")
poy_estimate = convert_gradient(helper.pf_score_estimate(**pf_kwargs))
estimates['poyiadjis_1000'].append(poy_estimate)
runtimes['poyiadjis_1000'].append(time.time() - start_time)
# Poyiadjis N Smoother
start_time = time.time()
pf_kwargs.update(N=10000, pf="poyiadjis_N")
poy_estimate = convert_gradient(helper.pf_score_estimate(**pf_kwargs))
estimates['poyiadjis_10000'].append(poy_estimate)
runtimes['poyiadjis_10000'].append(time.time() - start_time)
dfs = []
variables = ['A', 'LQinv_vec', 'LRinv_vec']
for buffer_size, estimates, runtimes in zip(buffer_sizes, estimates_bs, runtimes_bs):
for key, value in estimates.items():
df = pd.DataFrame(np.array(value), columns=variables)
df.index.name = 'rep'
df = df.reset_index()
df['runtime'] = runtimes[key]
df = df.melt(id_vars='rep')
df['buffer_size'] = buffer_size
df['sampler'] = key
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
# Checkpoint
if not os.path.isdir(os.path.join(path_to_out, 'trial')):
os.makedirs(os.path.join(path_to_out, 'trial'))
joblib.dump(df, os.path.join(path_to_out, 'trial',
'dat{0}_joblib.gz'.format(trial)))
# Append Results
for ii, variable in enumerate(variables):
true_grad = full_buffer_gradient[ii]
var_df = df[df['variable'] == variable]
runtime_df = df[df['variable'] == 'runtime']
for (sampler, buffer_size), sub_df in var_df.groupby(
['sampler', 'buffer_size']):
result_df = pd.DataFrame([dict(
sampler=sampler,
buffer_size=buffer_size,
trial=trial,
variable=variable,
mse=np.mean((sub_df['value'] - true_grad)**2),
bias_sq=(np.mean(sub_df['value']) - true_grad)**2,
var=np.var(sub_df['value']),
mean_runtime=np.mean(
runtime_df.query('sampler == @sampler & buffer_size == @buffer_size')['value'])
)])
results_dfs.append(result_df)
# Checkpoint Results
total_result_df = pd.concat(results_dfs, ignore_index=True)
joblib.dump(total_result_df,
os.path.join(path_to_out, 'summary_dat_joblib.gz'))
if trial % 10 == 0:
for variable in variables:
plt.close('all')
fig, ax = plt.subplots(1,1)
sns.boxplot(x='sampler', y='mse', hue='buffer_size',
data=total_result_df.query('variable == @variable'),
ax=ax)
ax.set_title("Boxplot of Gradient MSE")
fig.set_size_inches(8,6)
fig.savefig(os.path.join(path_to_out, "{0}_mse.png".format(variable)))
ax.set_yscale('log')
fig.savefig(os.path.join(path_to_out, "{0}_logmse.png".format(variable)))
fig, ax = plt.subplots(1,1)
sns.boxplot(x='sampler', y='bias_sq', hue='buffer_size',
data=total_result_df.query('variable == @variable'),
ax=ax)
ax.set_title("Boxplot of Gradient Bias Squared")
fig.set_size_inches(8,6)
fig.savefig(os.path.join(path_to_out, "{0}_bias.png".format(variable)))
ax.set_yscale('log')
fig.savefig(os.path.join(path_to_out, "{0}_logbias.png".format(variable)))
fig, ax = plt.subplots(1,1)
sns.boxplot(x='sampler', y='var', hue='buffer_size',
data=total_result_df.query('variable == @variable'),
ax=ax)
ax.set_title("Boxplot of Gradient Variance")
fig.set_size_inches(8,6)
fig.savefig(os.path.join(path_to_out, "{0}_var.png".format(variable)))
ax.set_yscale('log')
fig.savefig(os.path.join(path_to_out, "{0}_logvar.png".format(variable)))
plt.close('all')
### Script
if __name__ == "__main__":
N_reps = 100 #number of repetitions
N_trials = 100
buffer_sizes = np.array([8, 6, 4, 2, 1, 0])
A = 0.95
Q = 0.5
R = 0.5
# Set 1
T = 100 #length of series
L = 16
pars = np.array((A, Q, R))
path_to_out = os.path.join(
"./scratch/svm_grad_compare/",
"{0}".format(tuple(pars)))
make_plots(T, L, N_reps, N_trials, pars, buffer_sizes, path_to_out)
| 8,962 | 37.969565 | 107 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/nonlinear_ssm_pf_experiment_scripts/gradient_error_fig_scripts/garch_grad_compare.py | #!/usr/bin/python3
#This script is being used to test the particle gradient approximations (full data)
#######IMPORT RELEVANT MODULES######################
import numpy as np
import pandas as pd
import time
import joblib
import os
from sgmcmc_ssm.models.garch import (
GARCHParameters,
GARCHSampler,
GARCHPrior,
generate_garch_data,
GARCHHelper,
)
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
### Main Function
def make_plots(T, L, N_reps, N_trials, pars, buffer_sizes, path_to_out, seed=12345, save_dat=True):
print("\n===========================================================")
print("T = {0}, L = {4}, N_reps = {1}, N_trial={5}, pars = {2}, buffer_sizes = {3}".format(
T, N_reps, pars, buffer_sizes, L, N_trials))
print("===========================================================\n")
np.random.seed(seed)
# Generate Data
alpha, beta, gamma, tau = pars[0], pars[1], pars[2], pars[3]
R = np.eye(1)*tau**2
log_mu, logit_phi, logit_lambduh = \
GARCHParameters.convert_alpha_beta_gamma(alpha, beta, gamma)
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = GARCHParameters(
log_mu=log_mu,
logit_phi=logit_phi,
logit_lambduh=logit_lambduh,
LRinv=LRinv,
)
parameters_dict = parameters.as_dict().copy()
def convert_gradient(grad_dict):
return [
grad_dict['log_mu'],
grad_dict['logit_phi'],
grad_dict['logit_lambduh'],
grad_dict['LRinv_vec'],
]
results_dfs = []
for trial in tqdm(range(N_trials), desc="Trial"):
data = generate_garch_data(T=T, parameters=parameters, tqdm=tqdm)
t0 = (T+L)//2
observations = data['observations']
helper = GARCHHelper(forward_message=data['initial_message'],
**parameters.dim)
# Compute Exact (Full Buffered Gradient)
start_time = time.time()
full_buffer_gradients = [None]*10
pbar = tqdm(range(10))
pbar.set_description('Number of Reps')
buffer_size = 12
pf_kwargs = dict(
observations=observations[t0-buffer_size:t0+L+buffer_size],
parameters=parameters,
kernel=None,
subsequence_start = buffer_size,
subsequence_end = L+buffer_size,
pf='poyiadjis_N',
N=1000000,
tqdm=tqdm,
)
for rep in pbar:
full_buffer_gradients[rep] = convert_gradient(
helper.pf_gradient_estimate(
**pf_kwargs,
))
full_buffer_gradient = np.mean(full_buffer_gradients, axis=0)
full_buffer_gradient_sd = np.std(full_buffer_gradients, axis=0)
print(full_buffer_gradient)
print(full_buffer_gradient_sd)
full_buffer_time = time.time() - start_time
estimates_bs = [dict(
poyiadjis_100=[], poyiadjis_1000=[], poyiadjis_10000=[])
for _ in range(len(buffer_sizes))]
runtimes_bs = [{key:[] for key in estimates_bs[0].keys()}
for _ in range(len(buffer_sizes))]
pbar_bs = tqdm(zip(buffer_sizes, estimates_bs, runtimes_bs),
desc="buffer size",
total=len(buffer_sizes))
for buffer_size, estimates, runtimes in pbar_bs:
pf_kwargs = dict(
observations=observations[t0-buffer_size:t0+L+buffer_size],
parameters=parameters,
kernel=None,
subsequence_start = buffer_size,
subsequence_end = L+buffer_size,
tqdm=tqdm,
)
pbar = tqdm(range(N_reps))
pbar.set_description('Number of Reps')
for rep in pbar:
# Poyiadjis N Smoother
start_time = time.time()
pf_kwargs.update(N=100, pf="poyiadjis_N")
poy_estimate = convert_gradient(helper.pf_score_estimate(**pf_kwargs))
estimates['poyiadjis_100'].append(poy_estimate)
runtimes['poyiadjis_100'].append(time.time() - start_time)
# Poyiadjis N Smoother
start_time = time.time()
pf_kwargs.update(N=1000, pf="poyiadjis_N")
poy_estimate = convert_gradient(helper.pf_score_estimate(**pf_kwargs))
estimates['poyiadjis_1000'].append(poy_estimate)
runtimes['poyiadjis_1000'].append(time.time() - start_time)
# Poyiadjis N Smoother
start_time = time.time()
pf_kwargs.update(N=10000, pf="poyiadjis_N")
poy_estimate = convert_gradient(helper.pf_score_estimate(**pf_kwargs))
estimates['poyiadjis_10000'].append(poy_estimate)
runtimes['poyiadjis_10000'].append(time.time() - start_time)
dfs = []
variables = ['log_mu', 'logit_phi', 'logit_lambduh', 'LRinv_vec']
for buffer_size, estimates, runtimes in zip(buffer_sizes, estimates_bs, runtimes_bs):
for key, value in estimates.items():
df = pd.DataFrame(np.array(value), columns=variables)
df.index.name = 'rep'
df = df.reset_index()
df['runtime'] = runtimes[key]
df = df.melt(id_vars='rep')
df['buffer_size'] = buffer_size
df['sampler'] = key
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
# Checkpoint
if not os.path.isdir(os.path.join(path_to_out, 'trial')):
os.makedirs(os.path.join(path_to_out, 'trial'))
joblib.dump(df, os.path.join(path_to_out, 'trial',
'dat{0}_joblib.gz'.format(trial)))
# Append Results
for ii, variable in enumerate(variables):
true_grad = full_buffer_gradient[ii]
var_df = df[df['variable'] == variable]
runtime_df = df[df['variable'] == 'runtime']
for (sampler, buffer_size), sub_df in var_df.groupby(
['sampler', 'buffer_size']):
result_df = pd.DataFrame([dict(
sampler=sampler,
buffer_size=buffer_size,
trial=trial,
variable=variable,
mse=np.mean((sub_df['value'] - true_grad)**2),
bias_sq=(np.mean(sub_df['value']) - true_grad)**2,
var=np.var(sub_df['value']),
mean_runtime=np.mean(
runtime_df.query('sampler == @sampler & buffer_size == @buffer_size')['value'])
)])
results_dfs.append(result_df)
# Checkpoint Results
total_result_df = pd.concat(results_dfs, ignore_index=True)
joblib.dump(total_result_df,
os.path.join(path_to_out, 'summary_dat_joblib.gz'))
if trial % 10 == 0:
for variable in variables:
plt.close('all')
fig, ax = plt.subplots(1,1)
sns.boxplot(x='sampler', y='mse', hue='buffer_size',
data=total_result_df.query('variable == @variable'),
ax=ax)
ax.set_title("Boxplot of Gradient MSE")
fig.set_size_inches(8,6)
fig.savefig(os.path.join(path_to_out, "{0}_mse.png".format(variable)))
ax.set_yscale('log')
fig.savefig(os.path.join(path_to_out, "{0}_logmse.png".format(variable)))
fig, ax = plt.subplots(1,1)
sns.boxplot(x='sampler', y='bias_sq', hue='buffer_size',
data=total_result_df.query('variable == @variable'),
ax=ax)
ax.set_title("Boxplot of Gradient Bias Squared")
fig.set_size_inches(8,6)
fig.savefig(os.path.join(path_to_out, "{0}_bias.png".format(variable)))
ax.set_yscale('log')
fig.savefig(os.path.join(path_to_out, "{0}_logbias.png".format(variable)))
fig, ax = plt.subplots(1,1)
sns.boxplot(x='sampler', y='var', hue='buffer_size',
data=total_result_df.query('variable == @variable'),
ax=ax)
ax.set_title("Boxplot of Gradient Variance")
fig.set_size_inches(8,6)
fig.savefig(os.path.join(path_to_out, "{0}_var.png".format(variable)))
ax.set_yscale('log')
fig.savefig(os.path.join(path_to_out, "{0}_logvar.png".format(variable)))
plt.close('all')
### Script
if __name__ == "__main__":
N_reps = 10 #number of repetitions
N_trials = 10
buffer_sizes = np.array([8, 6, 4, 3, 2, 1, 0])
alpha = 0.1
beta = 0.8
gamma = 0.05
tau = 0.3
# Set 1
T = 40 #length of series
L = 16
pars = np.array((alpha, beta, gamma, tau))
path_to_out = os.path.join(
"./scratch/garch_grad_compare/",
"{0}".format(tuple(pars)))
make_plots(T, L, N_reps, N_trials, pars, buffer_sizes, path_to_out)
| 9,330 | 38.041841 | 107 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/nonlinear_ssm_pf_experiment_scripts/gradient_error_fig_scripts/lgssm_grad_compare.py | #!/usr/bin/python3
#This script is being used to test the particle gradient approximations (full data)
#######IMPORT RELEVANT MODULES######################
import numpy as np
import pandas as pd
import time
import joblib
import os
from sgmcmc_ssm.models.lgssm import (
LGSSMParameters,
LGSSMSampler,
LGSSMPrior,
generate_lgssm_data,
LGSSMHelper,
)
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
### Main Function
def make_plots(T, L, N_reps, N_trials, pars, buffer_sizes, path_to_out, seed=12345, save_dat=True):
print("\n===========================================================")
print("T = {0}, L = {4}, N_reps = {1}, N_trial={5}, pars = {2}, buffer_sizes = {3}".format(
T, N_reps, pars, buffer_sizes, L, N_trials))
print("===========================================================\n")
np.random.seed(seed)
# Generate Data
A = np.eye(1) * pars[0]
Q = np.eye(1) * pars[1]
R = np.eye(1) * pars[2]
C = np.eye(1)
LQinv = np.linalg.cholesky(np.linalg.inv(Q))
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = LGSSMParameters(A=A, C=C, LQinv=LQinv, LRinv=LRinv)
def convert_gradient(grad_dict):
return [
grad_dict['A'].item(),
grad_dict['LQinv_vec'].item(),
grad_dict['LRinv_vec'].item(),
]
results_dfs = []
for trial in tqdm(range(N_trials), desc="Trial"):
data = generate_lgssm_data(T=T, parameters=parameters, tqdm=tqdm)
t0 = (T+L)//2
observations = data['observations']
helper = LGSSMHelper(forward_message=data['initial_message'],
**parameters.dim)
# Compute Exact (Full Buffered Gradient)
start_time = time.time()
full_buffer_gradients = [None]*10
pbar = tqdm(range(10), leave=False)
pbar.set_description('Number of Reps')
buffer_size = L
forward_message = helper.forward_message(
observations=observations[:t0],
parameters=parameters,
)
backward_message = helper.backward_message(
observations=observations[t0+L:],
parameters=parameters,
)
full_buffer_gradient = convert_gradient(
helper.gradient_marginal_loglikelihood(
observations[t0:t0+L], parameters,
forward_message=forward_message,
backward_message=backward_message)
)
full_buffer_time = time.time() - start_time
estimates_bs = [dict(
poyiadjis_100=[], poyiadjis_1000=[], poyiadjis_10000=[], kf=[])
for _ in range(len(buffer_sizes))]
runtimes_bs = [{key:[] for key in estimates_bs[0].keys()}
for _ in range(len(buffer_sizes))]
pbar_bs = tqdm(zip(buffer_sizes, estimates_bs, runtimes_bs),
desc="buffer size",
total=len(buffer_sizes))
for buffer_size, estimates, runtimes in pbar_bs:
pf_kwargs = dict(
observations=observations[t0-buffer_size:t0+L+buffer_size],
parameters=parameters,
kernel=None,
subsequence_start = buffer_size,
subsequence_end = L+buffer_size,
tqdm=tqdm,
)
pbar = tqdm(range(N_reps), leave=False)
pbar.set_description('Number of Reps')
for rep in pbar:
# Poyiadjis N Smoother
start_time = time.time()
pf_kwargs.update(N=100, pf="poyiadjis_N")
poy_estimate = convert_gradient(helper.pf_gradient_estimate(**pf_kwargs))
estimates['poyiadjis_100'].append(poy_estimate)
runtimes['poyiadjis_100'].append(time.time() - start_time)
# Poyiadjis N Smoother
start_time = time.time()
pf_kwargs.update(N=1000, pf="poyiadjis_N")
poy_estimate = convert_gradient(helper.pf_gradient_estimate(**pf_kwargs))
estimates['poyiadjis_1000'].append(poy_estimate)
runtimes['poyiadjis_1000'].append(time.time() - start_time)
# Poyiadjis N Smoother
start_time = time.time()
pf_kwargs.update(N=10000, pf="poyiadjis_N")
poy_estimate = convert_gradient(helper.pf_gradient_estimate(**pf_kwargs))
estimates['poyiadjis_10000'].append(poy_estimate)
runtimes['poyiadjis_10000'].append(time.time() - start_time)
# Exact KF Smoother
start_time = time.time()
forward_message = helper.forward_message(
observations=pf_kwargs['observations'][:buffer_size],
parameters=parameters,
)
backward_message = helper.backward_message(
observations=pf_kwargs['observations'][L+buffer_size:],
parameters=parameters,
)
grad_dict = helper.gradient_marginal_loglikelihood(
observations=pf_kwargs['observations'][buffer_size:L+buffer_size],
parameters=parameters,
tqdm=tqdm,
forward_message=forward_message,
backward_message=backward_message,
)
kf_mean_estimate = convert_gradient(grad_dict)
estimates['kf'].append(kf_mean_estimate)
runtimes['kf'].append(time.time() - start_time)
dfs = []
variables = ['A', 'LQinv', 'LRinv']
for buffer_size, estimates, runtimes in zip(buffer_sizes, estimates_bs, runtimes_bs):
for key, value in estimates.items():
df = pd.DataFrame(np.array(value), columns=variables)
df.index.name = 'rep'
df = df.reset_index()
df['runtime'] = runtimes[key]
df = df.melt(id_vars='rep')
df['buffer_size'] = buffer_size
df['sampler'] = key
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
# Checkpoint
if not os.path.isdir(os.path.join(path_to_out, 'trial')):
os.makedirs(os.path.join(path_to_out, 'trial'))
joblib.dump(df, os.path.join(path_to_out, 'trial',
'dat{0}_joblib.gz'.format(trial)))
# Append Results
for ii, variable in enumerate(variables):
true_grad = full_buffer_gradient[ii]
var_df = df[df['variable'] == variable]
runtime_df = df[df['variable'] == 'runtime']
for (sampler, buffer_size), sub_df in var_df.groupby(
['sampler', 'buffer_size']):
result_df = pd.DataFrame([dict(
sampler=sampler,
buffer_size=buffer_size,
trial=trial,
variable=variable,
mse=np.mean((sub_df['value'] - true_grad)**2),
bias_sq=(np.mean(sub_df['value']) - true_grad)**2,
var=np.var(sub_df['value']),
mean_runtime=np.mean(
runtime_df.query('sampler == @sampler & buffer_size == @buffer_size')['value'])
)])
results_dfs.append(result_df)
# Checkpoint Results
total_result_df = pd.concat(results_dfs, ignore_index=True)
joblib.dump(total_result_df,
os.path.join(path_to_out, 'summary_dat_joblib.gz'))
if trial % 10 == 0:
for variable in variables:
plt.close('all')
fig, ax = plt.subplots(1,1)
sns.boxplot(x='sampler', y='mse', hue='buffer_size',
data=total_result_df.query('variable == @variable'),
ax=ax)
ax.set_title("Boxplot of Gradient MSE")
fig.set_size_inches(8,6)
fig.savefig(os.path.join(path_to_out, "{0}_mse.png".format(variable)))
ax.set_yscale('log')
fig.savefig(os.path.join(path_to_out, "{0}_logmse.png".format(variable)))
fig, ax = plt.subplots(1,1)
sns.boxplot(x='sampler', y='bias_sq', hue='buffer_size',
data=total_result_df.query('variable == @variable'),
ax=ax)
ax.set_title("Boxplot of Gradient Bias Squared")
fig.set_size_inches(8,6)
fig.savefig(os.path.join(path_to_out, "{0}_bias.png".format(variable)))
ax.set_yscale('log')
fig.savefig(os.path.join(path_to_out, "{0}_logbias.png".format(variable)))
fig, ax = plt.subplots(1,1)
sns.boxplot(x='sampler', y='var', hue='buffer_size',
data=total_result_df.query('variable == @variable'),
ax=ax)
ax.set_title("Boxplot of Gradient Variance")
fig.set_size_inches(8,6)
fig.savefig(os.path.join(path_to_out, "{0}_var.png".format(variable)))
ax.set_yscale('log')
fig.savefig(os.path.join(path_to_out, "{0}_logvar.png".format(variable)))
plt.close('all')
### Script
if __name__ == "__main__":
N_reps = 100 #number of repetitions
N_trials = 100
buffer_sizes = np.array([8, 6, 4, 2, 1, 0])
A = 0.9
Q = 0.7
R = 1.0
# Set 1
T = 100 #length of series
L = 16
pars = np.array((A, Q, R))
path_to_out = os.path.join(
"./scratch/lgssm_grad_compare/",
"{0}".format(tuple(pars)))
make_plots(T, L, N_reps, N_trials, pars, buffer_sizes, path_to_out)
print("Plots are at {}".format(path_to_out))
| 10,036 | 39.800813 | 107 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/lgssm_quick_start.py | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
np.random.seed(12357)
###############################################################################
# Generate Synthetic Data
###############################################################################
from sgmcmc_ssm.models.lgssm import (
LGSSMParameters,
generate_lgssm_data,
)
T = 1000
## Parameters
A = np.array([[0.9753, -0.0961], [ 0.0961, 0.9753]])
Q = np.eye(2)*0.1
C = np.eye(2)
R = np.eye(2)*0.5
parameters = LGSSMParameters(A=A, C=C, Q=Q, R=R)
## Generate Data
data = generate_lgssm_data(T=1000, parameters=parameters)
## Plot Data
fig, axes = plt.subplots(2, 1, sharex=True)
axes[0].plot(data['observations'][:,0], '-C0', label='observation')
axes[0].plot(data['latent_vars'][:,0], '--C1', label='latent')
axes[0].set_ylabel("dim = 0")
axes[1].plot(data['observations'][:,1], '-C0', label='observation')
axes[1].plot(data['latent_vars'][:,1], '--C1', label='latent')
axes[1].set_ylabel("dim = 1")
axes[1].set_xlabel("t")
axes[0].legend()
###############################################################################
# Setup Sampler
###############################################################################
from sgmcmc_ssm.models.lgssm import LGSSMSampler
sampler = LGSSMSampler(n=2, m=2, observations=data['observations'])
# Fit Using Gibbs
sampler.prior_init()
print(sampler.exact_logjoint())
sampler.fit(num_iters=30, iter_type='Gibbs', tqdm=tqdm)
print(sampler.exact_logjoint())
## Plot Smoothing Distr for Latent Variables
def plot_distr(mean, cov, data, xmin=0, xmax=200):
fig, axes = plt.subplots(2, 1, sharex=True)
axes[0].plot(mean[:,0], '-C1', label='Post Mean')
axes[0].plot(mean[:,0]+cov[:,0,0]**0.5, '--C1', label='Post Mean +/- SD')
axes[0].plot(mean[:,0]-cov[:,0,0]**0.5, '--C1')
axes[0].plot(data['latent_vars'][:,0], '-C0', label='Truth')
axes[0].plot(data['observations'][:,0], 'xC2', label='Observations')
axes[0].legend()
axes[1].plot(mean[:,1], '-C1')
axes[1].plot(mean[:,1]+cov[:,1,1]**0.5, '--C1')
axes[1].plot(mean[:,1]-cov[:,1,1]**0.5, '--C1')
axes[1].plot(data['latent_vars'][:,1], '-C0')
axes[1].plot(data['observations'][:,1], 'xC2')
axes[1].set_xlim(xmin, xmax)
def plot_samples(samples, data, xmin=0, xmax=200):
fig, axes = plt.subplots(2, 1, sharex=True)
axes[0].plot(samples[:,0,:], 'C1', alpha=0.05)
axes[0].plot(data['latent_vars'][:,0], '-C0', label='Truth')
axes[0].plot(data['observations'][:,0], 'xC2', label='Observations')
axes[0].legend()
axes[1].plot(samples[:,1,:], '-C1', alpha=0.05)
axes[1].plot(data['latent_vars'][:,1], '-C0')
axes[1].plot(data['observations'][:,1], 'xC2')
axes[1].set_xlim(xmin, xmax)
### Marginal Mean + Covariance
mean_x, cov_x = sampler.predict(target='latent', return_distr=True)
plot_distr(mean_x, cov_x, data)
### Samples from Posterior
xs = sampler.predict(target='latent', num_samples=100)
plot_samples(xs, data)
# Fit Using SGLD
sampler.prior_init()
print(sampler.exact_logjoint())
sampler.fit(num_iters=1000, iter_type='SGLD',
epsilon=0.01, subsequence_length=16, buffer_length=4,
tqdm=tqdm)
print(sampler.exact_logjoint())
# Fit Using SGRLD
sampler.prior_init()
print(sampler.exact_logjoint())
sampler.fit(num_iters=1000, iter_type='SGRLD',
epsilon=0.01, subsequence_length=16, buffer_length=4,
tqdm=tqdm)
print(sampler.exact_logjoint())
# Fit Using ADAGRAD
sampler.prior_init()
print(sampler.exact_logjoint())
sampler.fit(num_iters=1000, iter_type='ADAGRAD',
epsilon=0.1, subsequence_length=16, buffer_length=4,
tqdm=tqdm)
print(sampler.exact_logjoint())
###############################################################################
# Evaluate Sampler
###############################################################################
## Parameters to evaluate
sampler.prior_init()
parameters_list = sampler.fit(num_iters=1000, iter_type='SGRLD',
epsilon=0.1, subsequence_length=16, buffer_length=4,
tqdm=tqdm, output_all=True,
)
## Specify Metric Functions
metric_functions = []
### Loglikelihood and Logjoint
from sgmcmc_ssm.metric_functions import noisy_logjoint_loglike_metric
metric_functions += [noisy_logjoint_loglike_metric()]
### log10 MSE at recovering X
from sgmcmc_ssm.metric_functions import metric_compare_x
metric_functions += [metric_compare_x(data['latent_vars'])]
### log10 MSE at recovering parameters
from sgmcmc_ssm.metric_functions import metric_function_parameters
metric_functions += [
metric_function_parameters(
parameter_names=['A', 'Q', 'R'],
target_values=[parameters.A, parameters.Q, parameters.R],
metric_names = ['logmse', 'logmse', 'logmse'],
)
]
## Specify Sample Functions
from sgmcmc_ssm.metric_functions import sample_function_parameters
sample_functions = sample_function_parameters(
['A', 'Q', 'LQinv', 'R', 'LRinv'],
)
# Offline Evaluation
from sgmcmc_ssm.evaluator import OfflineEvaluator
evaluator = OfflineEvaluator(
sampler=sampler,
parameters_list=parameters_list,
metric_functions=metric_functions, sample_functions=sample_functions,
)
evaluator.evaluate(num_to_eval=40, tqdm=tqdm)
print(evaluator.get_metrics())
print(evaluator.get_samples())
# Plot Results
from sgmcmc_ssm.plotting_utils import plot_metrics, plot_trace_plot
plot_metrics(evaluator, burnin=10)
plot_trace_plot(evaluator, burnin=10)
###############################################################################
# Compare Multiple Inference Methods
###############################################################################
init = sampler.prior_init()
sampler = LGSSMSampler(n=2, m=2, observations=data['observations'])
max_time = 60
## Fit Gibbs saving sample every second
gibbs_parameters, gibbs_time = sampler.fit_timed(
iter_type='Gibbs',
init_parameters=init, max_time=max_time, min_save_time=1, tqdm=tqdm,
)
## Fit SGRLD (No Buffer)
nobuffer_parameters, nobuffer_time = sampler.fit_timed(
iter_type='SGRLD',
epsilon=0.1, subsequence_length=8, buffer_length=0,
init_parameters=init, max_time=max_time, min_save_time=1, tqdm=tqdm,
)
## Fit SGRLD (Buffer)
buffer_parameters, buffer_time = sampler.fit_timed(
iter_type='SGRLD',
epsilon=0.1, subsequence_length=8, buffer_length=4,
init_parameters=init, max_time=max_time, min_save_time=1, tqdm=tqdm,
)
## Evaluate
evaluators = {}
from sgmcmc_ssm.evaluator import half_average_parameters_list
evaluators['Gibbs'] = OfflineEvaluator(sampler,
parameters_list=half_average_parameters_list(gibbs_parameters),
parameters_times=gibbs_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
evaluators['SGRLD No Buffer'] = OfflineEvaluator(sampler,
parameters_list=half_average_parameters_list(nobuffer_parameters),
parameters_times=nobuffer_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
evaluators['SGRLD Buffer'] = OfflineEvaluator(sampler,
parameters_list=half_average_parameters_list(buffer_parameters),
parameters_times=buffer_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
for evaluator in tqdm(evaluators.values()):
evaluator.evaluate(40, tqdm=tqdm)
# Plot Results
from sgmcmc_ssm.plotting_utils import compare_metrics
compare_metrics(evaluators, x='time')
| 7,617 | 33.008929 | 79 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/exchange_rate/exchange_rate_full_demo.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from sgmcmc_ssm.models.svm import SeqSVMSampler
from tqdm import tqdm
np.random.seed(12345)
###############################################################################
# Load and Scale Data
###############################################################################
exchange_data = np.load('data/EURUS_processed.npz')
print(list(exchange_data.keys()))
hourly_log_returns = exchange_data['hourly_log_returns']
hourly_dates = exchange_data['hourly_date']
print(hourly_log_returns.shape)
#from sklearn.preprocessing import RobustScaler
#scaler = RobustScaler()
#observations = scaler.fit_transform(hourly_log_returns.reshape(-1,1))
observations = hourly_log_returns.reshape(-1,1) * 1000
print(observations.shape)
###############################################################################
# Plot Data
###############################################################################
fig, ax = plt.subplots(1,1)
ax.plot(hourly_dates, observations)
###############################################################################
# Split Data on Gaps > 6 Hour
###############################################################################
gap_indices = np.where(np.diff(hourly_dates) > pd.Timedelta('6h'))[0].tolist()
split_observations = []
for start, end in zip([0]+gap_indices, gap_indices+[observations.size]):
if end - start > 6:
split_observations.append(observations[start:end])
split_summaries = pd.DataFrame([
dict(max=np.max(obs), mean=np.mean(obs), min=np.min(obs), num=len(obs))
for obs in split_observations])
print(np.around(split_summaries, decimals=2))
###############################################################################
# Fit SVM
###############################################################################
# Evaluate on all segments
sampler = SeqSVMSampler(n=1, m=1, observations=split_observations)
sampler.prior_init()
sampler.project_parameters()
print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
return_loglike=True, tqdm=tqdm))
def compare_smoothed_pfs(list_of_kwargs, num_segments=5):
# Only compare fit on first num_segments
observations = sampler.observations[0:num_segments]
means_covs = {}
for kwargs in list_of_kwargs:
name = '{0} {1}'.format(kwargs.get('pf','Poyiadjis O(N)'),
kwargs.get('N'))
means_covs[name] = sampler.predict(target='latent', kind='pf',
tqdm=tqdm, observations=observations, **kwargs)
for jj, observation in enumerate(observations):
fig, ax = plt.subplots(1, 1)
for ii, (name, mean_cov) in enumerate(means_covs.items()):
x_mean = mean_cov[jj][0][:,0]
x_cov = mean_cov[jj][1][:,0, 0]
ax.plot(x_mean, '-C{0}'.format(ii), label=name)
ax.plot(x_mean+np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(x_mean-np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(np.log(observation**2)-np.log(sampler.parameters.R), '.k',
label='log(data^2) - log(R)')
ax.legend()
ax.set_title('observations[{}]'.format(jj))
return fig, ax
list_of_kwargs = [
dict(N = 100),
dict(N = 1000),
dict(N = 10000),
dict(pf='paris', N = 100),
dict(pf='paris', N = 1000),
# dict(pf='paris', N = 10000),
]
fig, ax = compare_smoothed_pfs(list_of_kwargs)
# Fit using SGLD
fit_time = 60*60
sgld_parameters, sgld_time = sampler.fit_timed(
iter_type='SGLD',
epsilon=0.001, subsequence_length=16, num_sequences=1, buffer_length=4,
kind='pf', pf_kwargs=dict(pf='poyiadjis_N', N=1000),
max_time=fit_time,
tqdm=tqdm, #tqdm_iter=True,
)
print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
return_loglike=True, tqdm=tqdm))
# Fit using LD (SGLD with S = T, for all sequences)
sampler.parameters = sgld_parameters[0].copy()
ld_parameters, ld_time = sampler.fit_timed(
iter_type='SGLD',
epsilon=0.1, subsequence_length=-1, num_sequences=-1, buffer_length=0,
kind='pf', pf_kwargs=dict(pf='paris', N=1000),
max_time=fit_time,
tqdm=tqdm, tqdm_iter=True,
)
print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
return_loglike=True, tqdm=tqdm))
###############################################################################
# Evaluate Fit
###############################################################################
from sgmcmc_ssm.evaluator import OfflineEvaluator, half_average_parameters_list
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
)
# Evaluate Loglikelihood on Training Set
metric_functions=[
noisy_logjoint_loglike_metric(tqdm=tqdm, kind='pf', pf='paris', N=1000),
]
sample_functions=sample_function_parameters(['A', 'Q', 'R'])
# Evaluate SGLD samples
sgld_evaluator = OfflineEvaluator(sampler,
parameters_list=sgld_parameters,
parameters_times=sgld_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
sgld_evaluator.evaluate(40, tqdm=tqdm)
ld_evaluator = OfflineEvaluator(sampler,
parameters_list=ld_parameters,
parameters_times=ld_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
ld_evaluator.evaluate(40, tqdm=tqdm)
# Plot Traces, Metrics, and Compare
from sgmcmc_ssm.plotting_utils import (
plot_trace_plot,
plot_metrics,
compare_metrics,
)
plot_trace_plot(sgld_evaluator)
plot_metrics(sgld_evaluator)
plot_trace_plot(ld_evaluator)
plot_metrics(ld_evaluator)
compare_metrics(dict(
SGLD=sgld_evaluator,
LD=ld_evaluator,
),
x='time',
)
#
| 5,954 | 33.622093 | 79 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/exchange_rate/exchange_rate_subset_demo.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from sgmcmc_ssm.models.svm import SeqSVMSampler
from tqdm import tqdm
np.random.seed(12345)
###############################################################################
# Load and Scale Data
###############################################################################
exchange_data = np.load('data/EURUS_processed.npz')
print(list(exchange_data.keys()))
hourly_log_returns = exchange_data['hourly_log_returns']
hourly_dates = exchange_data['hourly_date']
print(hourly_log_returns.shape)
#from sklearn.preprocessing import RobustScaler
#scaler = RobustScaler()
#observations = scaler.fit_transform(hourly_log_returns.reshape(-1,1))
observations = hourly_log_returns.reshape(-1,1) * 1000
print(observations.shape)
###############################################################################
# Plot Data
###############################################################################
fig, ax = plt.subplots(1,1)
ax.plot(hourly_dates, observations)
###############################################################################
# Split Data on Gaps > 6 Hour
###############################################################################
gap_indices = np.where(np.diff(hourly_dates) > pd.Timedelta('6h'))[0].tolist()
split_observations = []
for start, end in zip([0]+gap_indices, gap_indices+[observations.size]):
if end - start > 6:
split_observations.append(observations[start:end])
split_summaries = pd.DataFrame([
dict(max=np.max(obs), mean=np.mean(obs), min=np.min(obs), num=len(obs))
for obs in split_observations])
print(np.around(split_summaries, decimals=2))
###############################################################################
# Fit SVM
###############################################################################
# Only Fit/Evaluate on the first 5 segments
sampler = SeqSVMSampler(n=1, m=1, observations=split_observations[0:5])
sampler.prior_init()
sampler.project_parameters()
print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
return_loglike=True, tqdm=tqdm))
def compare_smoothed_pfs(list_of_kwargs):
means_covs = {}
for kwargs in list_of_kwargs:
name = '{0} {1}'.format(kwargs.get('pf','Poyiadjis O(N)'),
kwargs.get('N'))
means_covs[name] = sampler.predict(target='latent', kind='pf',
tqdm=tqdm, **kwargs)
for jj, observation in enumerate(sampler.observations):
fig, ax = plt.subplots(1, 1)
for ii, (name, mean_cov) in enumerate(means_covs.items()):
x_mean = mean_cov[jj][0][:,0]
x_cov = mean_cov[jj][1][:,0, 0]
ax.plot(x_mean, '-C{0}'.format(ii), label=name)
ax.plot(x_mean+np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(x_mean-np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(np.log(observation**2)-np.log(sampler.parameters.R), '.k',
label='log(data^2) - log(R)')
ax.legend()
ax.set_title('observations[{}]'.format(jj))
return fig, ax
list_of_kwargs = [
dict(N = 100),
dict(N = 1000),
dict(N = 10000),
dict(pf='paris', N = 100),
dict(pf='paris', N = 1000),
# dict(pf='paris', N = 10000),
]
fig, ax = compare_smoothed_pfs(list_of_kwargs)
# Fit using SGLD
sgld_parameters, sgld_time = sampler.fit_timed(
iter_type='SGLD',
epsilon=0.001, subsequence_length=16, num_sequences=1, buffer_length=4,
kind='pf', pf_kwargs=dict(pf='poyiadjis_N', N=1000),
max_time=5*60,
tqdm=tqdm, #tqdm_iter=True,
)
print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
return_loglike=True, tqdm=tqdm))
# Fit using LD (SGLD with S = T, for all sequences)
sampler.parameters = sgld_parameters[0].copy()
ld_parameters, ld_time = sampler.fit_timed(
iter_type='SGLD',
epsilon=0.1, subsequence_length=-1, num_sequences=-1, buffer_length=0,
kind='pf', pf_kwargs=dict(pf='paris', N=1000),
max_time=5*60,
tqdm=tqdm, tqdm_iter=True,
)
print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
return_loglike=True, tqdm=tqdm))
###############################################################################
# Evaluate Fit
###############################################################################
from sgmcmc_ssm.evaluator import OfflineEvaluator, half_average_parameters_list
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
)
# Evaluate Loglikelihood on Training Set
metric_functions=[
noisy_logjoint_loglike_metric(tqdm=tqdm, kind='pf', pf='paris', N=1000),
]
sample_functions=sample_function_parameters(['A', 'Q', 'R'])
# Evaluate SGLD samples
sgld_evaluator = OfflineEvaluator(sampler,
parameters_list=sgld_parameters,
parameters_times=sgld_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
sgld_evaluator.evaluate(40, tqdm=tqdm)
ld_evaluator = OfflineEvaluator(sampler,
parameters_list=ld_parameters,
parameters_times=ld_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
ld_evaluator.evaluate(40, tqdm=tqdm)
# Plot Traces, Metrics, and Compare
from sgmcmc_ssm.plotting_utils import (
plot_trace_plot,
plot_metrics,
compare_metrics,
)
plot_trace_plot(sgld_evaluator)
plot_metrics(sgld_evaluator)
plot_trace_plot(ld_evaluator)
plot_metrics(ld_evaluator)
compare_metrics(dict(
SGLD=sgld_evaluator,
LD=ld_evaluator,
),
x='time',
)
#
| 5,815 | 33.414201 | 79 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/exchange_rate/exchange_rate_single_demo.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from sgmcmc_ssm.models.svm import SeqSVMSampler, SVMSampler
from tqdm import tqdm
np.random.seed(12345)
###############################################################################
# Load and Scale Data
###############################################################################
exchange_data = np.load('data/EURUS_processed.npz')
print(list(exchange_data.keys()))
hourly_log_returns = exchange_data['hourly_log_returns']
hourly_dates = exchange_data['hourly_date']
print(hourly_log_returns.shape)
#from sklearn.preprocessing import RobustScaler
#scaler = RobustScaler()
#observations = scaler.fit_transform(hourly_log_returns.reshape(-1,1))
observations = hourly_log_returns.reshape(-1,1) * 1000
print(observations.shape)
###############################################################################
# Plot Data
###############################################################################
fig, ax = plt.subplots(1,1)
ax.plot(hourly_dates, observations)
###############################################################################
# Split Data on Gaps > 6 Hour
###############################################################################
gap_indices = np.where(np.diff(hourly_dates) > pd.Timedelta('6h'))[0].tolist()
split_observations = []
for start, end in zip([0]+gap_indices, gap_indices+[observations.size]):
if end - start > 6:
split_observations.append(observations[start:end])
split_summaries = pd.DataFrame([
dict(max=np.max(obs), mean=np.mean(obs), min=np.min(obs), num=len(obs))
for obs in split_observations])
print(np.around(split_summaries, decimals=2))
###############################################################################
# Fit SVM
###############################################################################
# Only Evaluate on the second segment
fig, ax = plt.subplots(1,1)
ax.plot(split_observations[1])
sampler = SVMSampler(n=1, m=1, observations=split_observations[1])
sampler.prior_init()
sampler.project_parameters()
print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
return_loglike=True, tqdm=tqdm))
def compare_smoothed_pfs(sampler, list_of_kwargs):
means_covs = {}
for kwargs in list_of_kwargs:
name = '{0} {1}'.format(kwargs.get('pf','Poyiadjis O(N)'),
kwargs.get('N'))
x_mean, x_cov = sampler.predict(target='latent', kind='pf',
tqdm=tqdm, **kwargs)
x_mean, x_cov = x_mean[:,0], x_cov[:, 0,0]
means_covs[name] = x_mean, x_cov
fig, ax = plt.subplots(1, 1)
for ii, (name, (x_mean, x_cov)) in enumerate(means_covs.items()):
ax.plot(x_mean, '-C{0}'.format(ii), label=name)
ax.plot(x_mean+np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(x_mean-np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(np.log(sampler.observations**2)-np.log(sampler.parameters.R), '.k',
label='log(data^2) - log(R)')
ax.legend()
return fig, ax
list_of_kwargs = [
dict(N = 100),
dict(N = 1000),
dict(N = 10000),
dict(pf='paris', N = 100),
dict(pf='paris', N = 1000),
# dict(pf='paris', N = 10000),
]
fig, ax = compare_smoothed_pfs(list_of_kwargs)
# Fit using SGLD
sgld_parameters, sgld_time = sampler.fit_timed(
iter_type='SGLD',
epsilon=0.001, subsequence_length=16, buffer_length=4,
kind='pf', pf_kwargs=dict(pf='poyiadjis_N', N=1000),
max_time=5*60,
tqdm=tqdm, #tqdm_iter=True,
)
print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
return_loglike=True, tqdm=tqdm))
# Fit using LD (SGLD with S = T)
sampler.parameters = sgld_parameters[0].copy()
ld_parameters, ld_time = sampler.fit_timed(
iter_type='SGLD',
epsilon=0.1, subsequence_length=-1, buffer_length=0,
kind='pf', pf_kwargs=dict(pf='paris', N=1000),
max_time=5*60,
tqdm=tqdm, tqdm_iter=True,
)
print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
return_loglike=True, tqdm=tqdm))
###############################################################################
# Evaluate Fit
###############################################################################
from sgmcmc_ssm.evaluator import OfflineEvaluator, half_average_parameters_list
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
)
# Evaluate Loglikelihood on Training Set
metric_functions=[
noisy_logjoint_loglike_metric(tqdm=tqdm, kind='pf', pf='paris', N=1000),
]
sample_functions=sample_function_parameters(['A', 'Q', 'R'])
# Evaluate SGLD samples
sgld_evaluator = OfflineEvaluator(sampler,
parameters_list=sgld_parameters,
parameters_times=sgld_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
sgld_evaluator.evaluate(40, tqdm=tqdm)
ld_evaluator = OfflineEvaluator(sampler,
parameters_list=ld_parameters,
parameters_times=ld_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
ld_evaluator.evaluate(40, tqdm=tqdm)
# Plot Traces, Metrics, and Compare
from sgmcmc_ssm.plotting_utils import (
plot_trace_plot,
plot_metrics,
compare_metrics,
)
plot_trace_plot(sgld_evaluator)
plot_metrics(sgld_evaluator)
plot_trace_plot(ld_evaluator)
plot_metrics(ld_evaluator)
compare_metrics(dict(
SGLD=sgld_evaluator,
LD=ld_evaluator,
),
x='time',
)
#
| 5,702 | 33.355422 | 79 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/exchange_rate/process_exchange_data.py | import pandas as pd
import numpy as np
import os
###############################################################################
# Process Raw Exchange Rate Data Pulled From Finam.RU
###############################################################################
path_to_raw_data = './data/EURUS_data.csv'
path_to_output_data = './data/EURUS_processed.npz'
df = pd.read_csv(path_to_raw_data, dtype={'<DATE>': str, '<TIME>':str})
df['date'] = pd.to_datetime(df['<DATE>'] + df['<TIME>'], format="%Y%m%d%H%M%S")
prices = df[['date', '<CLOSE>']]
prices.columns = ['date', 'close']
# Minute Returns
minute_series = pd.DataFrame(prices['close'].map(np.log).diff() - np.mean(prices['close'].map(np.log).diff()))
minute_series['date'] = prices['date']
minute_series = minute_series.iloc[1:]
minute_series.columns = ['log_returns', 'date']
# Hourly Returns
hourly_price = prices.copy()
hourly_price['date'] = hourly_price['date'].map(lambda x: x.replace(minute=0))
hourly_price = hourly_price.groupby(['date']).nth(0).reset_index()
hourly_series = pd.DataFrame(hourly_price['close'].map(np.log).diff() - np.mean(hourly_price['close'].map(np.log).diff()))
hourly_series['date'] = hourly_price['date']
hourly_series = hourly_series.iloc[1:]
hourly_series.columns = ['log_returns', 'date']
# Daily Returns
daily_price = prices.copy()
daily_price['date'] = daily_price['date'].map(lambda x: x.replace(hour=0, minute=0))
daily_price = daily_price.groupby(['date']).nth(0).reset_index()
daily_series = pd.DataFrame(daily_price['close'].map(np.log).diff() - np.mean(daily_price['close'].map(np.log).diff()))
daily_series['date'] = daily_price['date']
daily_series = daily_series.iloc[1:]
daily_series.columns = ['log_returns', 'date']
# Save Data
data = dict(
minute_log_returns=minute_series['log_returns'],
minute_date=np.array(minute_series['date'], dtype='datetime64[m]'),
hourly_log_returns=hourly_series['log_returns'],
hourly_date=np.array(hourly_series['date'], dtype='datetime64[h]'),
daily_log_returns=daily_series['log_returns'],
daily_date=np.array(daily_series['date'], dtype='datetime64[D]'),
)
np.savez_compressed(path_to_output_data, **data)
| 2,209 | 39.181818 | 122 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/synthetic/lgssm_demo.py | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import joblib
import logging
import os
from tqdm import tqdm
from sgmcmc_ssm.models.lgssm import (
generate_lgssm_data,
LGSSMParameters,
LGSSMPrior,
LGSSMPreconditioner,
LGSSMHelper,
LGSSMSampler,
)
from sgmcmc_ssm.plotting_utils import (
compare_metrics,
plot_trace_plot,
)
sns.set()
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
## =========================================================
path_to_save ="scratch/LGSSM_demo/"
num_inits = 1
np.random.seed(12345)
## =========================================================
# Parameters
## Define Gaussian HMM Parameters
A = np.eye(2)*0.9
Q = np.eye(2)*1.0
C = np.eye(2)
R = np.eye(2)*0.1
LQinv = np.linalg.cholesky(np.linalg.inv(Q))
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = LGSSMParameters(A=A, C=C, LQinv=LQinv, LRinv=LRinv)
# Generate Data
my_data = generate_lgssm_data(T=1000, parameters=parameters)
# Generate Samplers
preconditioner = LGSSMPreconditioner()
sampler_steps = {
"SGRLD (buffer)": [
['sample_sgrld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 2, 'buffer_length': 2,
'minibatch_size': 10, 'preconditioner': preconditioner, }, {}]*10
],
"SGRLD (no buffer)": [
['sample_sgrld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 2, 'buffer_length': 0,
'minibatch_size': 10, 'preconditioner': preconditioner}, {}]*10
],
"SGLD (buffer)": [
['sample_sgld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 2, 'buffer_length': 2,
'minibatch_size': 10}, {}]*10
],
"SGLD (no buffer)": [
['sample_sgld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 2, 'buffer_length': 0,
'minibatch_size': 10}, {}]*10
],
"Gibbs": [
['sample_gibbs', 'project_parameters'],
[{}, {}],
],
}
my_samplers = {
(key, init): LGSSMSampler(name=key, **parameters.dim)
for key in sampler_steps.keys()
for init in range(num_inits)
}
my_prior = LGSSMPrior.generate_default_prior(var=1, **parameters.dim)
for key, sampler in my_samplers.items():
sampler.setup(my_data['observations'], my_prior,
)
sampler.project_parameters()
# Init Samplers from Prior
init_parameters = {}
for _, sampler in my_samplers.items():
for init in tqdm(range(num_inits)):
sampler.sample_gibbs()
init_parameter = sampler.parameters.project_parameters()
init_parameters[init] = init_parameter.copy()
break
for (key, init), sampler in my_samplers.items():
init_param = init_parameters[init].copy()
sampler.parameters = init_param
# Setup my_evaluators
from sgmcmc_ssm.evaluator import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
metric_function_from_sampler,
metric_function_parameters,
metric_compare_x,
noisy_logjoint_loglike_metric,
sample_function_parameters,
)
parameter_names2 = ['A', 'C', 'Q', 'R']
my_metric_functions = [
metric_function_parameters(parameter_names2,
target_values=[getattr(my_data['parameters'], parameter_name)
for parameter_name in parameter_names2],
metric_names=['mse' for parameter_name in parameter_names2],
),
metric_compare_x(my_data['latent_vars']),
metric_function_from_sampler("predictive_loglikelihood"),
noisy_logjoint_loglike_metric(),
]
my_sample_functions = [
sample_function_parameters(
parameter_names2 + ['LRinv', 'LQinv']
),
]
my_evaluators = {
"{0}_{1}".format(*key): SamplerEvaluator(sampler,
my_metric_functions, my_sample_functions,
sampler_name="{0}_{1}".format(*key))
for key, sampler in my_samplers.items()
}
keys = my_evaluators.keys()
for step in tqdm(range(1000)):
for ii, key in enumerate(keys):
my_evaluators[key].evaluate_sampler_step(*sampler_steps[key.split("_")[0]])
if (step % 25) == 0:
logging.info("============= CHECKPOINT ================")
if not os.path.isdir(path_to_save):
os.makedirs(path_to_save)
joblib.dump({
key: evaluator.get_state()
for key, evaluator in my_evaluators.items()},
os.path.join(path_to_save, "lgssm_demo.p"))
g = compare_metrics(my_evaluators)
g.savefig(os.path.join(path_to_save, "metrics_compare.png"))
if step > 50:
g = compare_metrics(my_evaluators, full_trace=False)
g.savefig(os.path.join(path_to_save, "metrics_compare_zoom.png"))
for key in my_evaluators.keys():
sampler = my_evaluators[key].sampler
fig, axes = plot_trace_plot(my_evaluators[key])
fig.suptitle(key)
fig.savefig(os.path.join(path_to_save, "{0}_trace.png".format(key)))
plt.close('all')
| 5,352 | 30.674556 | 83 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/synthetic/hmm_demo.py | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import joblib
import logging
import os
from tqdm import tqdm
from sgmcmc_ssm.models.gauss_hmm import (
generate_gausshmm_data,
GaussHMMParameters,
GaussHMMPrior,
GaussHMMPreconditioner,
GaussHMMHelper,
GaussHMMSampler,
)
from sgmcmc_ssm.plotting_utils import (
compare_metrics,
plot_trace_plot,
)
sns.set()
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
## =========================================================
path_to_save ="scratch/GaussHMM_demo/"
num_inits = 1
np.random.seed(12345)
## =========================================================
# Parameters
## Define Gaussian HMM Parameters
pi = np.array([[0.9, 0.1], [0.1, 0.9]])
logit_pi = np.log(pi + 1e-5)
mu = np.array([[1, -1], [-1, 1]])
R = np.array([np.eye(2) for _ in range(2)])
LRinv = np.array([np.linalg.cholesky(np.linalg.inv(R_k)) for R_k in R])
parameters = GaussHMMParameters(logit_pi=logit_pi, mu=mu, LRinv=LRinv)
# Generate Data
my_data = generate_gausshmm_data(T=1000, parameters=parameters)
# Generate Samplers
preconditioner = GaussHMMPreconditioner(expanded_pi_base = 0.1)
sampler_steps = {
"SGRLD (buffer)": [
['sample_sgrld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 2, 'buffer_length': 2,
'minibatch_size': 10, 'preconditioner': preconditioner, }, {}]*10
],
"SGRLD (no buffer)": [
['sample_sgrld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 2, 'buffer_length': 0,
'minibatch_size': 10, 'preconditioner': preconditioner}, {}]*10
],
"SGLD (buffer)": [
['sample_sgld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 2, 'buffer_length': 2,
'minibatch_size': 10}, {}]*10
],
"SGLD (no buffer)": [
['sample_sgld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 2, 'buffer_length': 0,
'minibatch_size': 10}, {}]*10
],
"Gibbs": [
['sample_gibbs', 'project_parameters'],
[{}, {}],
],
}
my_samplers = {
(key, init): GaussHMMSampler(name=key, **parameters.dim)
for key in sampler_steps.keys()
for init in range(num_inits)
}
my_prior = GaussHMMPrior.generate_default_prior(var=1, **parameters.dim)
for key, sampler in my_samplers.items():
sampler.setup(my_data['observations'], my_prior,
)
if key[0].startswith("SGRLD"):
sampler.parameters.pi_type = "expanded"
sampler.project_parameters()
# Init Samplers from Prior
init_parameters = {}
for _, sampler in my_samplers.items():
for init in tqdm(range(num_inits)):
sampler.sample_gibbs()
init_parameter = sampler.parameters.project_parameters()
init_parameters[init] = init_parameter.copy()
break
for (key, init), sampler in my_samplers.items():
init_param = init_parameters[init].copy()
if key.startswith('SGRLD'):
init_param.pi_type = 'expanded'
else:
init_param.pi_type = 'logit'
sampler.parameters = init_param
# Setup my_evaluators
from sgmcmc_ssm.evaluator import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
metric_function_from_sampler,
metric_function_parameters,
metric_compare_z,
noisy_logjoint_loglike_metric,
sample_function_parameters,
)
parameter_names = ['pi']
parameter_names2 = ['mu', 'R']
my_metric_functions = [
metric_function_parameters(parameter_names,
target_values=[getattr(my_data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names=['logmse' for parameter_name in parameter_names],
criteria=[min for parameter_name in parameter_names],
double_permutation_flag=True,
),
metric_function_parameters(parameter_names,
target_values=[getattr(my_data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names=['mse' for parameter_name in parameter_names],
criteria=[min for parameter_name in parameter_names],
double_permutation_flag=True,
),
metric_function_parameters(parameter_names2,
target_values=[getattr(my_data['parameters'], parameter_name)
for parameter_name in parameter_names2],
metric_names=['mse' for parameter_name in parameter_names2],
criteria=[min for parameter_name in parameter_names2],
),
metric_compare_z(my_data['latent_vars']),
metric_function_from_sampler("predictive_loglikelihood"),
noisy_logjoint_loglike_metric(),
]
my_sample_functions = [
sample_function_parameters(
parameter_names + parameter_names2 + ['expanded_pi']
),
]
my_evaluators = {
"{0}_{1}".format(*key): SamplerEvaluator(sampler,
my_metric_functions, my_sample_functions,
sampler_name="{0}_{1}".format(*key))
for key, sampler in my_samplers.items()
}
keys = my_evaluators.keys()
for step in tqdm(range(1000)):
for ii, key in enumerate(keys):
my_evaluators[key].evaluate_sampler_step(*sampler_steps[key.split("_")[0]])
if (step % 25) == 0:
logging.info("============= CHECKPOINT ================")
if not os.path.isdir(path_to_save):
os.makedirs(path_to_save)
joblib.dump({
key: evaluator.get_state()
for key, evaluator in my_evaluators.items()},
os.path.join(path_to_save, "gauss_hmm_demo.p"))
g = compare_metrics(my_evaluators)
g.savefig(os.path.join(path_to_save, "metrics_compare.png"))
if step > 50:
g = compare_metrics(my_evaluators, full_trace=False)
g.savefig(os.path.join(path_to_save, "metrics_compare_zoom.png"))
for key in my_evaluators.keys():
sampler = my_evaluators[key].sampler
fig, axes = plot_trace_plot(my_evaluators[key])
fig.suptitle(key)
fig.savefig(os.path.join(path_to_save, "{0}_trace.png".format(key)))
plt.close('all')
| 6,538 | 33.597884 | 83 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/synthetic/slds_demo.py | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import joblib
import logging
import os
from tqdm import tqdm
from sgmcmc_ssm.models.slds import (
generate_slds_data,
SLDSParameters,
SLDSPrior,
SLDSPreconditioner,
SLDSHelper,
SLDSSampler,
)
from sgmcmc_ssm.plotting_utils import (
compare_metrics,
plot_trace_plot,
)
sns.set()
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
## =========================================================
path_to_save ="scratch/SLDS_demo/"
num_inits = 1
np.random.seed(12345)
## =========================================================
# Parameters
## Define SLDS Parameters
alpha = 0.1
delta=np.pi/2
pi = np.array([
[1-alpha, alpha],
[alpha, 1-alpha]])
rot_mat = lambda rho: np.array([[np.cos(rho), -np.sin(rho)], [np.sin(rho), np.cos(rho)]])
A = np.array([rot_mat(-delta/2), rot_mat(delta/2)]) * 0.9
Q = np.array([np.eye(2), np.eye(2)]) * 0.1
C = np.eye(2)
R = np.eye(2)*0.1
logit_pi = np.log(pi+0.000001)
LQinv = np.array([np.linalg.cholesky(np.linalg.inv(Q_k)) for Q_k in Q])
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = SLDSParameters(logit_pi=logit_pi, A=A, C=C, LQinv=LQinv, LRinv=LRinv)
# Generate Data
my_data = generate_slds_data(T=500, parameters=parameters)
# Generate Samplers
preconditioner = SLDSPreconditioner()
sampler_steps = {
"SGRLD XZ (buffer)": [
['sample_sgrld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 5, 'buffer_length': 5,
'kind': 'complete', 'minibatch_size': 10,
'preconditioner': preconditioner}, {}]*10,
],
"SGRLD X (buffer)": [
['sample_sgrld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 5, 'buffer_length': 5,
'kind': 'x_marginal', 'minibatch_size': 10,
'preconditioner': preconditioner}, {}]*10,
],
"SGRLD Z (buffer)": [
['sample_sgrld', 'project_parameters']*10,
[{'epsilon': 0.1, 'subsequence_length': 5, 'buffer_length': 5,
'kind': 'z_marginal', 'minibatch_size': 10,
'preconditioner': preconditioner}, {}]*10,
],
"Gibbs": [
['sample_gibbs', 'project_parameters'],
[{}, {}],
],
}
my_samplers = {
(key, init): SLDSSampler(name=key, **parameters.dim)
for key in sampler_steps.keys()
for init in range(num_inits)
}
my_prior = SLDSPrior.generate_default_prior(var=1, **parameters.dim)
for key, sampler in my_samplers.items():
sampler.setup(my_data['observations'], my_prior,
)
sampler.init_sample_latent()
sampler.project_parameters()
# Init Samplers from Prior
init_parameters = {}
for _, sampler in my_samplers.items():
for init in tqdm(range(num_inits)):
sampler.sample_gibbs()
init_parameter = sampler.parameters.project_parameters()
init_parameters[init] = init_parameter.copy()
break
for (key, init), sampler in my_samplers.items():
init_param = init_parameters[init].copy()
sampler.parameters = init_param
# Setup my_evaluators
from sgmcmc_ssm.evaluator import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
metric_function_from_sampler,
metric_function_parameters,
metric_compare_x,
metric_compare_z,
noisy_logjoint_loglike_metric,
sample_function_parameters,
)
parameter_names = ['pi']
parameter_names2 = ['A', 'Q']
parameter_names3 = ['C', 'R']
my_metric_functions = [
metric_function_parameters(parameter_names,
target_values=[getattr(my_data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names=['logmse' for parameter_name in parameter_names],
criteria=[min for parameter_name in parameter_names],
double_permutation_flag=True,
),
metric_function_parameters(parameter_names,
target_values=[getattr(my_data['parameters'], parameter_name)
for parameter_name in parameter_names],
metric_names=['mse' for parameter_name in parameter_names],
criteria=[min for parameter_name in parameter_names],
double_permutation_flag=True,
),
metric_function_parameters(parameter_names2,
target_values=[getattr(my_data['parameters'], parameter_name)
for parameter_name in parameter_names2],
metric_names=['mse' for parameter_name in parameter_names2],
criteria=[min for parameter_name in parameter_names2],
),
metric_function_parameters(parameter_names3,
target_values=[getattr(my_data['parameters'], parameter_name)
for parameter_name in parameter_names3],
metric_names=['mse' for parameter_name in parameter_names2],
),
# metric_compare_x(my_data['latent_vars']['x']),
# metric_compare_z(my_data['latent_vars']['z']),
noisy_logjoint_loglike_metric(subsequence_length=50, buffer_length=10),
]
my_sample_functions = [
sample_function_parameters(
parameter_names2 + ['LRinv', 'LQinv']
),
]
my_evaluators = {
"{0}_{1}".format(*key): SamplerEvaluator(sampler,
my_metric_functions, my_sample_functions,
sampler_name="{0}_{1}".format(*key))
for key, sampler in my_samplers.items()
}
keys = my_evaluators.keys()
for step in tqdm(range(1000)):
for ii, key in enumerate(keys):
my_evaluators[key].evaluate_sampler_step(*sampler_steps[key.split("_")[0]])
if (step % 25) == 0:
logging.info("============= CHECKPOINT ================")
if not os.path.isdir(path_to_save):
os.makedirs(path_to_save)
joblib.dump({
key: evaluator.get_state()
for key, evaluator in my_evaluators.items()},
os.path.join(path_to_save, "slds_demo.p"))
g = compare_metrics(my_evaluators)
g.savefig(os.path.join(path_to_save, "metrics_compare.png"))
if step > 50:
g = compare_metrics(my_evaluators, full_trace=False)
g.savefig(os.path.join(path_to_save, "metrics_compare_zoom.png"))
for key in my_evaluators.keys():
sampler = my_evaluators[key].sampler
fig, axes = plot_trace_plot(my_evaluators[key], single_variables=['C', 'LRinv', 'R', 'Rinv'])
fig.suptitle(key)
fig.savefig(os.path.join(path_to_save, "{0}_trace.png".format(key)))
plt.close('all')
| 6,876 | 33.557789 | 105 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/ion_channel/ion_channel_subset_demo.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sgmcmc_ssm.models.gauss_hmm import GaussHMMSampler
from tqdm import tqdm
np.random.seed(12345)
# Load and Scale Data
from scipy.io import loadmat
ion_data = loadmat('data/alamethicin.mat')
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
observations = scaler.fit_transform(ion_data['originaldata'][1095:-3000])
filtered_observations = scaler.transform(ion_data['filtereddata'])
# Plot Data
fig, ax = plt.subplots(1,1)
ax.plot(observations[-500000::50], '-', label='scaled data')
ax.plot(filtered_observations[-500000::50], '-', label='scaled filtered data')
ax.set_title('Scaled Downsampled Ion Data')
ax.set_xlabel('Time')
ax.set_ylabel('Voltage (Scaled)')
ax.legend()
# Only Process a subset for this example
Y = observations[-500000::50]
filtered_Y = filtered_observations[-500000::50]
# Fit Gauss HMM using Gibbs Sampling
sampler = GaussHMMSampler(num_states=6, m=1, observations=Y)
sampler.init_parameters_from_k_means(observations=Y[:10000], n_init=20)
gibbs_parameters, gibbs_time = sampler.fit_timed(iter_type='Gibbs',
max_time=60,
tqdm=tqdm,
)
# Compare Fit at Init and Final
def compare_inference(parameters, sampler, Y, filtered_Y, tqdm=None):
sampler.parameters = parameters.copy()
z_prob = sampler.predict(
observations=Y,
target='latent', return_distr=True, tqdm=tqdm,
)
z_map = np.argmax(z_prob, axis=1)
fig, axes = plt.subplots(2,1, sharex=True, gridspec_kw={'height_ratios':[3,1]})
axes[0].plot(Y, '-k', label='observations')
for k in range(sampler.num_states):
x = Y.copy()
x[z_map != k] = np.nan
axes[0].plot(x, '-C{}'.format(k), label='MAP(z) = {}'.format(k))
axes[0].plot(filtered_Y, '--', color='gray', label='filtered observations')
axes[0].legend()
axes[1].plot(z_prob, '-')
axes[1].set_ylabel('Latent Prob')
return fig, axes
fig, axes = compare_inference(gibbs_parameters[0], sampler, Y, filtered_Y)
fig.suptitle('Init Gibbs Fit')
fig, axes = compare_inference(gibbs_parameters[-1], sampler, Y, filtered_Y)
fig.suptitle('Final Gibbs Fit')
# Fit Gauss HMM using SGRLD
sampler = GaussHMMSampler(num_states=6, m=1, observations=Y)
sampler.init_parameters_from_k_means(observations=Y[:10000], n_init=20)
sampler.parameters.pi_type = 'expanded'
sgrld_parameters, sgrld_time = sampler.fit_timed(
iter_type='SGRLD',
max_time=60,
epsilon=0.001, subsequence_length=4, buffer_length=2,
tqdm=tqdm,
)
# Compare Fit at Init and Final
fig, axes = compare_inference(sgrld_parameters[0], sampler, Y, filtered_Y)
fig.suptitle('Init SGRLD Fit')
fig, axes = compare_inference(sgrld_parameters[-1], sampler, Y, filtered_Y)
fig.suptitle('Final SGRLD Fit')
################################################################################
# Sampler Evaluation
################################################################################
from sgmcmc_ssm.evaluator import OfflineEvaluator, half_average_parameters_list
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
noisy_predictive_logjoint_loglike_metric,
)
# Evaluate Loglikelihood and Predictive Loglikelihood
metric_functions=[
noisy_logjoint_loglike_metric(),
noisy_predictive_logjoint_loglike_metric(num_steps_ahead=3),
]
sample_functions=sample_function_parameters(['pi', 'logit_pi', 'mu', 'R'])
# Evaluate Gibbs samples
gibbs_evaluator = OfflineEvaluator(sampler,
parameters_list=half_average_parameters_list(gibbs_parameters),
parameters_times=gibbs_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
gibbs_evaluator.evaluate(16, tqdm=tqdm)
# Evaluate SGRLD samples
sgrld_evaluator = OfflineEvaluator(sampler,
parameters_list=half_average_parameters_list(sgrld_parameters),
parameters_times=sgrld_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
sgrld_evaluator.evaluate(16, tqdm=tqdm)
# Plot Traces, Metrics, and Compare
from sgmcmc_ssm.plotting_utils import (
plot_trace_plot,
plot_metrics,
compare_metrics,
)
plot_trace_plot(gibbs_evaluator)
plot_metrics(gibbs_evaluator)
plot_trace_plot(sgrld_evaluator)
plot_metrics(sgrld_evaluator)
compare_metrics(dict(
Gibbs=gibbs_evaluator,
SGRLD=sgrld_evaluator,
),
x='time',
)
# EOF
| 4,577 | 31.013986 | 83 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/ion_channel/ion_channel_downsample_demo.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sgmcmc_ssm.models.gauss_hmm import GaussHMMSampler
from tqdm import tqdm
np.random.seed(12345)
# Load and Scale Data
from scipy.io import loadmat
ion_data = loadmat('data/alamethicin.mat')
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
observations = scaler.fit_transform(ion_data['originaldata'][1095:-3000])
filtered_observations = scaler.transform(ion_data['filtereddata'])
T = len(observations)
# Plot Data
fig, ax = plt.subplots(1,1)
ax.plot(np.arange(T)[::50], observations[::50], '-', label='scaled data')
ax.plot(np.arange(T)[::50], filtered_observations[::50], '-', label='scaled filtered data')
ax.set_title('Scaled Ion Data')
ax.set_xlabel('Time')
ax.set_ylabel('Voltage (Scaled)')
ax.legend()
# Process Downsampled
Y = observations[:-1000000:50]
filtered_Y = filtered_observations[:-1000000:50]
Y_test = observations[-1000000::50]
filtered_Y_test = filtered_observations[-1000000::50]
# Fit Gauss HMM using Gibbs Sampling (this is slow)
sampler = GaussHMMSampler(num_states=6, m=1, observations=Y)
sampler.init_parameters_from_k_means(observations=Y, n_init=20, verbose=True)
gibbs_parameters, gibbs_time = sampler.fit_timed(iter_type='Gibbs',
max_time=5*60,
tqdm=tqdm, tqdm_iter=True,
)
# Compare Fit at Init and Final on subset
def compare_inference(parameters, sampler, Y, filtered_Y, tqdm=None):
sampler.parameters = parameters.copy()
z_prob = sampler.predict(
observations=Y,
target='latent', return_distr=True, tqdm=tqdm,
)
z_map = np.argmax(z_prob, axis=1)
fig, axes = plt.subplots(2,1, sharex=True, gridspec_kw={'height_ratios':[3,1]})
axes[0].plot(Y, '-k', label='observations')
for k in range(sampler.num_states):
x = Y.copy()
x[z_map != k] = np.nan
axes[0].plot(x, '-C{}'.format(k), label='MAP(z) = {}'.format(k))
axes[0].plot(filtered_Y, '--', color='gray', label='filtered observations')
axes[0].legend()
axes[1].plot(z_prob, '-')
axes[1].set_ylabel('Latent Prob')
return fig, axes
# Compare Fit at Init and Final On Test Set
fig, axes = compare_inference(gibbs_parameters[0], sampler,
Y_test, filtered_Y_test, tqdm=tqdm)
fig.suptitle('Init Gibbs Fit')
fig, axes = compare_inference(gibbs_parameters[-1], sampler,
Y_test, filtered_Y_test, tqdm=tqdm)
fig.suptitle('Final Gibbs Fit')
# Fit Gauss HMM using SGRLD
sampler = GaussHMMSampler(num_states=6, m=1, observations=Y)
sampler.parameters = gibbs_parameters[0].copy()
sampler.parameters.pi_type = 'expanded'
sgrld_parameters, sgrld_time = sampler.fit_timed(
iter_type='SGRLD',
max_time=5*60,
epsilon=0.01, subsequence_length=4, buffer_length=2,
tqdm=tqdm,
)
# Compare Fit at Init and Final
fig, axes = compare_inference(sgrld_parameters[0], sampler,
Y_test, filtered_Y_test, tqdm=tqdm)
fig.suptitle('Init SGRLD Fit')
fig, axes = compare_inference(sgrld_parameters[-1], sampler,
Y_test, filtered_Y_test, tqdm=tqdm)
fig.suptitle('Final SGRLD Fit')
################################################################################
# Sampler Evaluation
################################################################################
from sgmcmc_ssm.evaluator import OfflineEvaluator, half_average_parameters_list
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
noisy_predictive_logjoint_loglike_metric,
)
# Evaluate Loglikelihood on Training and Predictive Loglikelihood on Test
metric_functions=[
noisy_logjoint_loglike_metric(tqdm=tqdm, observations=Y),
noisy_predictive_logjoint_loglike_metric(
num_steps_ahead=3, observations=Y_test, tqdm=tqdm),
]
sample_functions=sample_function_parameters(['pi', 'logit_pi', 'mu', 'R'])
# Evaluate Gibbs samples
gibbs_evaluator = OfflineEvaluator(sampler,
parameters_list=half_average_parameters_list(gibbs_parameters),
parameters_times=gibbs_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
gibbs_evaluator.evaluate(16, tqdm=tqdm)
# Evaluate SGRLD samples
sgrld_evaluator = OfflineEvaluator(sampler,
parameters_list=half_average_parameters_list(sgrld_parameters),
parameters_times=sgrld_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
sgrld_evaluator.evaluate(16, tqdm=tqdm)
# Plot Traces, Metrics, and Compare
from sgmcmc_ssm.plotting_utils import (
plot_trace_plot,
plot_metrics,
compare_metrics,
)
plot_trace_plot(gibbs_evaluator)
plot_metrics(gibbs_evaluator)
plot_trace_plot(sgrld_evaluator)
plot_metrics(sgrld_evaluator)
compare_metrics(dict(
Gibbs=gibbs_evaluator,
SGRLD=sgrld_evaluator,
),
x='time',
)
# EOF
| 4,961 | 31.860927 | 91 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/ion_channel/ion_channel_full_demo.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sgmcmc_ssm.models.gauss_hmm import GaussHMMSampler
from tqdm import tqdm
np.random.seed(12345)
# Load and Scale Data
from scipy.io import loadmat
ion_data = loadmat('data/alamethicin.mat')
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
observations = scaler.fit_transform(ion_data['originaldata'][1095:-3000])
filtered_observations = scaler.transform(ion_data['filtereddata'])
T = len(observations)
# Plot Data
fig, ax = plt.subplots(1,1)
ax.plot(np.arange(T)[::50], observations[::50], '-', label='scaled data')
ax.plot(np.arange(T)[::50], filtered_observations[::50], '-', label='scaled filtered data')
ax.set_title('Scaled Ion Data')
ax.set_xlabel('Time')
ax.set_ylabel('Voltage (Scaled)')
ax.legend()
# Process all
Y = observations[:-1000000]
filtered_Y = filtered_observations[:-1000000]
Y_test = observations[-1000000:]
filtered_Y_test = filtered_observations[-1000000:]
# Fit Gauss HMM using Gibbs Sampling -> Not Feasible -> 1 hour per iteration
#sampler = GaussHMMSampler(num_states=6, m=1, observations=Y)
#sampler.init_parameters_from_k_means(observations=Y, n_init=20, verbose=True)
#gibbs_parameters, gibbs_time = sampler.fit_timed(iter_type='Gibbs',
# max_time=5*60,
# tqdm=tqdm, tqdm_iter=True,
# )
# Fit Gauss HMM using SGRLD
sampler = GaussHMMSampler(num_states=6, m=1, observations=Y)
sampler.init_parameters_from_k_means(observations=Y[::50], n_init=20, verbose=True)
sampler.parameters.pi_type = 'expanded'
sgrld_parameters, sgrld_time = sampler.fit_timed(
iter_type='SGRLD',
max_time=5*60,
epsilon=0.001, subsequence_length=4, buffer_length=2,
tqdm=tqdm,
)
# Compare Fit at Init and Final On Subset
def compare_inference(parameters, sampler, Y, filtered_Y, tqdm=None):
sampler.parameters = parameters.copy()
z_prob = sampler.predict(
observations=Y,
target='latent', return_distr=True, tqdm=tqdm,
)
z_map = np.argmax(z_prob, axis=1)
fig, axes = plt.subplots(2,1, sharex=True, gridspec_kw={'height_ratios':[3,1]})
axes[0].plot(Y, '-k', label='observations')
for k in range(sampler.num_states):
x = Y.copy()
x[z_map != k] = np.nan
axes[0].plot(x, '-C{}'.format(k), label='MAP(z) = {}'.format(k))
axes[0].plot(filtered_Y, '--', color='gray', label='filtered observations')
axes[0].legend()
axes[1].plot(z_prob, '-')
axes[1].set_ylabel('Latent Prob')
return fig, axes
fig, axes = compare_inference(sgrld_parameters[0], sampler,
Y_test, filtered_Y_test, tqdm=tqdm)
fig.suptitle('Init SGRLD Fit')
fig, axes = compare_inference(sgrld_parameters[-1], sampler,
Y_test, filtered_Y_test, tqdm=tqdm)
fig.suptitle('Final SGRLD Fit')
################################################################################
# Sampler Evaluation
################################################################################
from sgmcmc_ssm.evaluator import OfflineEvaluator, half_average_parameters_list
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
noisy_predictive_logjoint_loglike_metric,
)
# Evaluate Predictive Loglikelihood on Test Set
metric_functions=[
noisy_predictive_logjoint_loglike_metric(
num_steps_ahead=3, observations=Y_test, tqdm=tqdm),
]
sample_functions=sample_function_parameters(['pi', 'logit_pi', 'mu', 'R'])
# Evaluate SGRLD samples
sgrld_evaluator = OfflineEvaluator(sampler,
parameters_list=half_average_parameters_list(sgrld_parameters),
parameters_times=sgrld_time,
metric_functions = metric_functions,
sample_functions = sample_functions,
)
sgrld_evaluator.evaluate(16, tqdm=tqdm)
# Plot Traces, Metrics, and Compare
from sgmcmc_ssm.plotting_utils import (
plot_trace_plot,
plot_metrics,
compare_metrics,
)
plot_trace_plot(sgrld_evaluator)
plot_metrics(sgrld_evaluator)
plot_metrics(sgrld_evaluator, x='time')
# EOF
| 4,127 | 32.024 | 91 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/api_demos/hmm_long_demo.py | import numpy as np
from sgmcmc_ssm.models.gauss_hmm import (
generate_gausshmm_data,
GaussHMMParameters,
GaussHMMPrior,
GaussHMMPreconditioner,
GaussHMMHelper,
GaussHMMSampler,
)
np.random.seed(12345)
# Parameters
## Define Gaussian HMM Parameters
logit_pi = np.array([[2, 0], [0, 2]])*2
mu = np.array([[1, -1], [-1, 1]])
R = np.array([np.eye(2), np.eye(2)]) * 0.01
LRinv = np.array([np.linalg.cholesky(np.linalg.inv(R_k)) for R_k in R])
parameters = GaussHMMParameters(logit_pi=logit_pi, mu=mu, LRinv=LRinv)
print(parameters)
## Access elements of parameters
print(parameters.pi)
print(parameters.mu)
print(parameters.R)
## Dimension of parameters
print(parameters.dim)
## Parameters as dict or as flattened numpy vector
print(parameters.as_dict())
print(parameters.as_vector())
# Generate Data
T = 1000
data = generate_gausshmm_data(T=1000, parameters=parameters)
## Synthetic Data Overview
print(data.keys())
print(data['observations'])
print(data['latent_vars'])
print(data['parameters'])
## Plot Data
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig, axes = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios':[3,1]})
axes[0].plot(data['observations'][:,0], 'C0')
axes[0].plot(data['observations'][:,1], 'C1')
axes[1].plot(data['latent_vars'], '.')
# Gaussian HMM Prior
## Default Prior
prior = GaussHMMPrior.generate_default_prior(**parameters.dim, var=1)
## Access Prior Parameters
print(prior.hyperparams)
## Sample from Prior
print(prior.sample_prior())
## Evaluate Log-Prior + Grad Log-Prior
print(prior.logprior(parameters=parameters))
print(prior.grad_logprior(parameters=parameters))
# Gaussian HMM Helper
## Setup Helper
helper = GaussHMMHelper(**parameters.dim)
## Forward + Backward Message Passing
print(helper.forward_message(data['observations'], parameters))
forward_messages = helper.forward_pass(data['observations'], parameters, include_init_message=True)
backward_messages = helper.backward_pass(data['observations'], parameters, include_init_message=True)
## Evaluate Marginal Log-likelihood
print(helper.marginal_loglikelihood(data['observations'], parameters))
for f_m, b_m in zip(forward_messages, backward_messages):
print(helper.marginal_loglikelihood(np.array([]), parameters, f_m, b_m))
## Evaluate Gradient Marginal Log-likelihood
print(helper.gradient_marginal_loglikelihood(data['observations'], parameters))
## Evaluate Predictive Log-likelihood
print(helper.predictive_loglikelihood(data['observations'], parameters, lag=10))
## Gibbs Sampler Sufficient Statistic
sufficient_stat = helper.calc_gibbs_sufficient_statistic(
data['observations'], data['latent_vars'])
print(sufficient_stat)
## Sampler parameters using Gibbs
print(helper.parameters_gibbs_sample(
data['observations'], data['latent_vars'], prior
))
## Sample latent variables using Gibbs
### Default is smoothed distribution
zhat = helper.latent_var_sample(data['observations'], parameters)
print(np.sum(zhat != data['latent_vars']))
fig, axes = plt.subplots(1, 1)
axes.plot(data['latent_vars'], 'C0.', label='truth')
axes.plot(zhat+0.1, 'C1.', label='smoothed sample')
axes.legend()
from sklearn.metrics import confusion_matrix
print('Confusion Matrix:')
print(confusion_matrix(data['latent_vars'], zhat))
### Sample latent variables from filtered/predictive distribution
print(helper.latent_var_sample(data['observations'], parameters, distribution="filtered"))
print(helper.latent_var_sample(data['observations'], parameters, distribution="predictive"))
## Distribution of observations
ymean, ysd = helper.y_marginal(data['observations'], parameters, distribution="smoothed")
plt.figure()
plt.plot(data['observations'][:,0], 'C0')
plt.plot(ymean[:,0], 'C1--')
plt.fill_between(
x=np.arange(ymean.shape[0]),
y1=ymean[:,0]-2*ysd[:,0],
y2=ymean[:,0]+2*ysd[:,0],
color='C1', alpha=0.5)
# Gaussian HMM Preconditioner
preconditioner = GaussHMMPreconditioner()
parameters.pi_type = 'expanded' # Use 'expanded' pi_type for preconditioning
grad = helper.gradient_marginal_loglikelihood(data['observations'], parameters)
## Precondition Gradient
print(grad)
print(preconditioner.precondition(grad, parameters))
## Preconditioned Noise + Correction term
print(preconditioner.precondition_noise(parameters))
print(preconditioner.correction_term(parameters))
# Gaussian HMM Sampler
## Setup Sampler
sampler = GaussHMMSampler(**parameters.dim)
sampler.setup(data['observations'], prior, parameters.copy())
## Evaluate Log Joint
print(sampler.exact_logjoint(return_loglike=True))
## Evaluate Gradient
### Default uses full sequence
grad = sampler.noisy_gradient()
print(grad)
### Example with subsequence
print(sampler.noisy_gradient(subsequence_length=10, buffer_length=5, minibatch_size=10))
## Preconditioned Gradient
precond_grad = sampler.noisy_gradient(preconditioner=preconditioner)
print(precond_grad)
### Example with subsequence
print(sampler.noisy_gradient(
preconditioner=preconditioner,
subsequence_length=10, buffer_length=5, minibatch_size=10))
## Example Gibbs Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_gibbs().project_parameters())
## Example SGD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_sgd(epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example ADAGRAD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_adagrad(epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example SGLD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgld(epsilon=0.1).project_parameters())
## Example SGRLD Step
sampler.parameters = sampler.prior.sample_prior()
sampler.parameters.pi_type = 'expanded' # Use 'expanded' parameterization for preconditioning
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgrld(epsilon=0.1, preconditioner=preconditioner).project_parameters())
## Using Evaluator
from sgmcmc_ssm import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
best_permutation_metric_function_parameter,
best_double_permutation_metric_function_parameter,
)
metric_functions = [
noisy_logjoint_loglike_metric(),
best_double_permutation_metric_function_parameter(
parameter_name = 'pi',
target_value = parameters.pi,
metric_name = 'mse',
best_function = min
),
best_permutation_metric_function_parameter(
parameter_name = 'mu',
target_value = parameters.mu,
metric_name = 'mse',
best_function = min
),
best_permutation_metric_function_parameter(
parameter_name = 'R',
target_value = parameters.R,
metric_name = 'mse',
best_function = min
),
]
sample_functions = sample_function_parameters(
['logit_pi', 'expanded_pi', 'pi', 'mu', 'R', 'LRinv'],
)
sampler = GaussHMMSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
evaluator = SamplerEvaluator(
sampler=sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few Gibbs Sampler steps
for _ in range(10):
evaluator.evaluate_sampler_step(['sample_gibbs', 'project_parameters'])
print(evaluator.metrics)
print(evaluator.samples)
## Run a few ADA_GRAD sampler steps
for _ in range(10):
evaluator.evaluate_sampler_step(
['step_adagrad', 'project_parameters'],
[dict(epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few SGRLD Steps
evaluator.sampler.parameters.pi_type = 'expanded'
for _ in range(100):
evaluator.evaluate_sampler_step(
['sample_sgrld', 'project_parameters'],
[dict(preconditioner=preconditioner,
epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
from sgmcmc_ssm.plotting_utils import plot_metrics, plot_trace_plot
plot_metrics(evaluator)
plot_trace_plot(evaluator)
| 8,623 | 30.246377 | 101 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/api_demos/slds_long_demo.py | import numpy as np
from sgmcmc_ssm.models.slds import (
generate_slds_data,
SLDSParameters,
SLDSPrior,
SLDSPreconditioner,
SLDSHelper,
SLDSSampler,
)
np.random.seed(12345)
# Parameters
## Define SLDS Parameters
alpha = 0.05
delta=np.pi
b = np.pi/2
pi = np.array([
[1-alpha, alpha],
[alpha, 1-alpha]])
rot_mat = lambda rho: np.array([[np.cos(rho), -np.sin(rho)], [np.sin(rho), np.cos(rho)]])
A = np.array([rot_mat(-delta/2+b), rot_mat(delta/2+b)]) * 0.9
Q = np.array([np.eye(2), np.eye(2)*2]) * 0.1
C = np.eye(2)
R = np.eye(2)*0.1
logit_pi = np.log(pi+0.000001)
LQinv = np.array([np.linalg.cholesky(np.linalg.inv(Q_k)) for Q_k in Q])
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = SLDSParameters(logit_pi=logit_pi, A=A, C=C, LQinv=LQinv, LRinv=LRinv)
print(parameters)
## Access elements of parameters
print(parameters.pi)
print(parameters.A)
print(parameters.Q)
print(parameters.C)
print(parameters.R)
## Dimension of parameters
print(parameters.dim)
## Parameters as dict or as flattened numpy vector
print(parameters.as_dict())
print(parameters.as_vector())
print(parameters.from_dict_to_vector(parameters.as_dict()))
print(parameters.from_vector_to_dict(parameters.as_vector(), **parameters.dim))
# Generate Data
T = 1000
data = generate_slds_data(T=1000, parameters=parameters)
## Synthetic Data Overview
print(data.keys())
print(data['observations'])
print(data['latent_vars'].keys())
print(data['latent_vars']['x'])
print(data['latent_vars']['z'])
print(data['parameters'])
## Plot Data
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig, axes = plt.subplots(3, 2, sharex=True,
gridspec_kw={'height_ratios':[6,2,2]})
axes[0,0].plot(data['observations'][:,0], '-C0', label='observation')
axes[0,0].plot(data['latent_vars']['x'][:,0], '--C1', label='latent')
axes[0,1].plot(data['observations'][:,1], '-C0', label='observation')
axes[0,1].plot(data['latent_vars']['x'][:,1], '--C1', label='latent')
axes[0,1].legend()
axes[1,0].plot(data['observations'][:,0] - data['latent_vars']['x'][:,0], '-C2', label='residual')
axes[1,1].plot(data['observations'][:,1] - data['latent_vars']['x'][:,1], '-C2', label='residual')
axes[1,1].legend()
axes[2,0].plot(data['latent_vars']['z'], '.')
axes[2,1].plot(data['latent_vars']['z'], '.')
# SLDS Prior
## Default Prior
prior = SLDSPrior.generate_default_prior(**parameters.dim, var=1)
## Access Prior Parameters
print(prior.hyperparams)
## Sample from Prior
print(prior.sample_prior())
## Evaluate Log-Prior + Grad Log-Prior
print(prior.logprior(parameters=parameters))
print(prior.grad_logprior(parameters=parameters))
# SLDS Helper
## Setup Helper
helper = SLDSHelper(**parameters.dim)
z = data['latent_vars']['z']
x = data['latent_vars']['x']
## Forward + Backward Message Passing Conditional on Z
print(helper.forward_message(data['observations'], parameters, z=z))
forward_messages = helper.forward_pass(data['observations'], parameters, z=z,
include_init_message=True)
backward_messages = helper.backward_pass(data['observations'], parameters, z=z,
include_init_message=True)
print(helper.marginal_loglikelihood(data['observations'], parameters, z=z))
for f_m, b_m in zip(forward_messages, backward_messages):
print(helper.marginal_loglikelihood(np.array([]), parameters, f_m, b_m,
z=[]))
## Forward + Backward Message Passing Conditional on X
print(helper.forward_message(data['observations'], parameters, x=x))
forward_messages = helper.forward_pass(data['observations'], parameters, x=x,
include_init_message=True)
backward_messages = helper.backward_pass(data['observations'], parameters, x=x,
include_init_message=True)
print(helper.marginal_loglikelihood(data['observations'], parameters, x=x))
for f_m, b_m in zip(forward_messages, backward_messages):
print(helper.marginal_loglikelihood(np.array([]), parameters, f_m, b_m,
x=[]))
## Complete Data Loglikelihood
print(helper.marginal_loglikelihood(data['observations'], parameters, z=z, x=x))
## Evaluate Gradient Log-likelihood
print(helper.gradient_marginal_loglikelihood(data['observations'], parameters,
z=z))
print(helper.gradient_marginal_loglikelihood(data['observations'], parameters,
x=x))
print(helper.gradient_marginal_loglikelihood(data['observations'], parameters,
x=x, z=z))
## Evaluate Predictive Log-likelihood
print(helper._x_predictive_loglikelihood(data['observations'],
z=z, parameters=parameters, lag=10))
print(helper._z_predictive_loglikelihood(data['observations'],
x=x, parameters=parameters, lag=10))
## Gibbs Sampler Sufficient Statistic
sufficient_stat = helper.calc_gibbs_sufficient_statistic(
data['observations'], data['latent_vars'])
print(sufficient_stat)
## Sampler parameters using Gibbs
print(helper.parameters_gibbs_sample(
data['observations'], data['latent_vars'], prior
))
## Sample latent variables using Gibbs
xhat = helper._x_latent_var_sample(data['observations'], z, parameters)
xhat = helper._x_latent_var_sample(data['observations'], z, parameters, distribution='filtered')
xhat = helper._x_latent_var_sample(data['observations'], z, parameters, distribution='predictive')
zhat = helper._z_latent_var_sample(data['observations'], x, parameters)
zhat = helper._z_latent_var_sample(data['observations'], x, parameters, distribution='filtered')
zhat = helper._z_latent_var_sample(data['observations'], x, parameters, distribution='predictive')
### Plots for X
fig, axes = plt.subplots(1, 2, sharex=True)
axes[0].plot(data['latent_vars']['x'][:,0], 'C0', label='truth')
axes[0].plot(xhat[:,0], ':C1', label='inferred')
axes[1].plot(data['latent_vars']['x'][:,1], 'C0', label='truth')
axes[1].plot(xhat[:,1], ':C1', label='inferred')
axes[1].legend()
### Plots for Z
fig, axes = plt.subplots(1, 1)
axes.plot(data['latent_vars']['z'], 'C0.', label='truth')
axes.plot(zhat+0.1, 'C1.', label='smoothed sample')
axes.legend()
# SLDS Preconditioner
preconditioner = SLDSPreconditioner()
parameters.pi_type = 'expanded'
grad = helper.gradient_marginal_loglikelihood(data['observations'], parameters, z=z, x=x)
## Precondition Gradient
print(grad)
print(preconditioner.precondition(grad, parameters))
## Preconditioned Noise + Correction term
print(preconditioner.precondition_noise(parameters))
print(preconditioner.correction_term(parameters))
# SLDS Sampler
## Setup Sampler
sampler = SLDSSampler(**parameters.dim)
sampler.setup(data['observations'], prior, parameters.copy())
sampler.init_sample_latent()
## Evaluate Log Joint
#print(sampler.exact_logjoint(return_loglike=True))
#print(sampler.noisy_logjoint(return_loglike=True, subsequence_length=-1))
print(sampler.noisy_logjoint(return_loglike=True, subsequence_length=50))
print(sampler.noisy_logjoint(return_loglike=True, subsequence_length=10, minibatch_size=5))
## Evaluate Gradient
### Default uses full sequence
grad = sampler.noisy_gradient()
print(grad)
### Example with subsequence
print(sampler.noisy_gradient(kind='complete', subsequence_length=10, buffer_length=5, minibatch_size=10))
print(sampler.noisy_gradient(kind='x_marginal', subsequence_length=10, buffer_length=5, minibatch_size=10))
print(sampler.noisy_gradient(kind='z_marginal', subsequence_length=10, buffer_length=5, minibatch_size=10))
## Preconditioned Gradient
precond_grad = sampler.noisy_gradient(preconditioner=preconditioner)
print(precond_grad)
### Example with subsequence
print(sampler.noisy_gradient(
preconditioner=preconditioner,
subsequence_length=10, buffer_length=5, minibatch_size=10))
## Example Gibbs Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_gibbs().project_parameters())
## Example SGD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_sgd(epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example ADAGRAD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_adagrad(epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example SGLD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgld(epsilon=0.1).project_parameters())
## Example SGRLD Step
sampler.parameters = sampler.prior.sample_prior()
sampler.parameters.pi_type = 'expanded'
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgrld(epsilon=0.1, preconditioner=preconditioner).project_parameters())
## Using Evaluator
from tqdm import tqdm
from sgmcmc_ssm import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
metric_function_parameters,
best_permutation_metric_function_parameter,
best_double_permutation_metric_function_parameter,
)
metric_functions = [
noisy_logjoint_loglike_metric(),
best_double_permutation_metric_function_parameter(
parameter_name = 'pi',
target_value = parameters.pi,
metric_name = 'mse',
best_function = min
),
best_permutation_metric_function_parameter(
parameter_name = 'A',
target_value = parameters.A,
metric_name = 'mse',
best_function = min
),
best_permutation_metric_function_parameter(
parameter_name = 'Q',
target_value = parameters.Q,
metric_name = 'mse',
best_function = min
),
metric_function_parameters(
parameter_names=['C', 'R'],
target_values=[parameters.C, parameters.R],
metric_names = ['mse', 'mse'],
)
]
sample_functions = sample_function_parameters(
['pi', 'A', 'Q', 'C', 'R'],
)
sampler = SLDSSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
sampler.init_sample_latent() ## THIS IS IMPORTANT
evaluator = SamplerEvaluator(
sampler=sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few Gibbs Sampler steps
for _ in tqdm(range(10)):
evaluator.evaluate_sampler_step(['sample_gibbs', 'project_parameters'])
print(evaluator.metrics)
print(evaluator.samples)
## Run a few ADA_GRAD sampler steps
for _ in tqdm(range(10)):
evaluator.evaluate_sampler_step(
['step_adagrad', 'project_parameters'],
[dict(epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few SGRLD Steps
evaluator.sampler.parameters.pi_type='expanded'
for _ in tqdm(range(10)):
evaluator.evaluate_sampler_step(
['sample_sgrld', 'project_parameters'],
[dict(preconditioner=preconditioner,
epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
from sgmcmc_ssm.plotting_utils import plot_metrics, plot_trace_plot
plot_metrics(evaluator)
plot_trace_plot(evaluator, single_variables=['C', 'LRinv', 'R', 'Rinv'])
| 11,414 | 31.991329 | 107 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/api_demos/arhmm_long_demo.py | import numpy as np
from sgmcmc_ssm.models.arphmm import (
generate_arphmm_data,
ARPHMMParameters,
ARPHMMPrior,
ARPHMMPreconditioner,
ARPHMMHelper,
ARPHMMSampler,
)
np.random.seed(12345)
# Parameters
## Define ARPHMM Parameters
logit_pi = np.array([[2, 0], [0, 2]])*2
D = np.array([np.eye(2), -np.eye(2)]) * 0.9
R = np.array([np.eye(2), np.eye(2)]) * 0.01
LRinv = np.array([np.linalg.cholesky(np.linalg.inv(R_k)) for R_k in R])
parameters = ARPHMMParameters(logit_pi=logit_pi, D=D, LRinv=LRinv)
print(parameters)
## Access elements of parameters
print(parameters.pi)
print(parameters.D)
print(parameters.R)
## Dimension of parameters
print(parameters.dim)
## Parameters as dict or as flattened numpy vector
print(parameters.as_dict())
print(parameters.as_vector())
print(parameters.from_dict_to_vector(parameters.as_dict()))
print(parameters.from_vector_to_dict(parameters.as_vector(), **parameters.dim))
# Generate Data
T = 1000
data = generate_arphmm_data(T=1000, parameters=parameters)
## Synthetic Data Overview
print(data.keys())
print(data['observations'])
print(data['latent_vars'])
print(data['parameters'])
## Plot Data
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig, axes = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios':[3,1]})
axes[0].plot(data['observations'][:,0,0], 'C0')
axes[0].plot(data['observations'][:,0,1], 'C1')
axes[1].plot(data['latent_vars'], '.')
# ARPHMM Prior
## Default Prior
prior = ARPHMMPrior.generate_default_prior(**parameters.dim, var=1)
## Access Prior Parameters
print(prior.hyperparams)
## Sample from Prior
print(prior.sample_prior())
## Evaluate Log-Prior + Grad Log-Prior
print(prior.logprior(parameters=parameters))
print(prior.grad_logprior(parameters=parameters))
# ARPHMM Helper
## Setup Helper
helper = ARPHMMHelper(**parameters.dim)
## Forward + Backward Message Passing
print(helper.forward_message(data['observations'], parameters))
forward_messages = helper.forward_pass(data['observations'], parameters, include_init_message=True)
backward_messages = helper.backward_pass(data['observations'], parameters, include_init_message=True)
## Evaluate Marginal Log-likelihood
print(helper.marginal_loglikelihood(data['observations'], parameters))
for f_m, b_m in zip(forward_messages, backward_messages):
print(helper.marginal_loglikelihood(np.array([]), parameters, f_m, b_m))
## Evaluate Gradient Marginal Log-likelihood
print(helper.gradient_marginal_loglikelihood(data['observations'], parameters))
## Evaluate Predictive Log-likelihood
print(helper.predictive_loglikelihood(data['observations'], parameters, lag=10))
## Gibbs Sampler Sufficient Statistic
sufficient_stat = helper.calc_gibbs_sufficient_statistic(
data['observations'], data['latent_vars'])
print(sufficient_stat)
## Sampler parameters using Gibbs
print(helper.parameters_gibbs_sample(
data['observations'], data['latent_vars'], prior
))
## Sample latent variables using Gibbs
### Default is smoothed distribution
zhat = helper.latent_var_sample(data['observations'], parameters)
print(np.sum(zhat != data['latent_vars']))
fig, ax = plt.subplots(1, 1)
ax.plot(data['latent_vars'], 'C0.', label='truth')
ax.plot(zhat+0.1, 'C1.', label='smoothed sample')
for err_loc in np.where(zhat != data['latent_vars'])[0]:
ax.axvline(x=err_loc, color='red', linewidth=1, linestyle='--')
ax.legend()
ax.set_xlabel("red lines are errors")
from sklearn.metrics import confusion_matrix
print('Confusion Matrix:')
print(confusion_matrix(data['latent_vars'], zhat))
### Sample latent variables from filtered/predictive distribution
print(helper.latent_var_sample(data['observations'], parameters, distribution="filtered"))
print(helper.latent_var_sample(data['observations'], parameters, distribution="predictive"))
# ARPHMM Preconditioner
preconditioner = ARPHMMPreconditioner()
parameters.pi_type = 'expanded'
grad = helper.gradient_marginal_loglikelihood(data['observations'], parameters)
## Precondition Gradient
print(grad)
print(preconditioner.precondition(grad, parameters))
## Preconditioned Noise + Correction term
print(preconditioner.precondition_noise(parameters))
print(preconditioner.correction_term(parameters))
# ARPHMM Sampler
## Setup Sampler
sampler = ARPHMMSampler(**parameters.dim)
sampler.setup(data['observations'], prior, parameters.copy())
## Evaluate Log Joint
print(sampler.exact_logjoint(return_loglike=True))
## Evaluate Gradient
### Default uses full sequence
grad = sampler.noisy_gradient()
print(grad)
### Example with subsequence
print(sampler.noisy_gradient(subsequence_length=10, buffer_length=5, minibatch_size=10))
## Preconditioned Gradient
precond_grad = sampler.noisy_gradient(preconditioner=preconditioner)
print(precond_grad)
### Example with subsequence
print(sampler.noisy_gradient(
preconditioner=preconditioner,
subsequence_length=10, buffer_length=5, minibatch_size=10))
## Example Gibbs Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_gibbs().project_parameters())
## Example SGD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_sgd(epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example ADAGRAD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_adagrad(epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example SGLD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgld(epsilon=0.1).project_parameters())
## Example SGRLD Step
sampler.parameters = sampler.prior.sample_prior()
sampler.parameters.pi_type = 'expanded'
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgrld(epsilon=0.1, preconditioner=preconditioner).project_parameters())
## Using Evaluator
from sgmcmc_ssm import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
best_permutation_metric_function_parameter,
best_double_permutation_metric_function_parameter,
)
metric_functions = [
noisy_logjoint_loglike_metric(),
best_double_permutation_metric_function_parameter(
parameter_name = 'pi',
target_value = parameters.pi,
metric_name = 'mse',
best_function = min
),
best_permutation_metric_function_parameter(
parameter_name = 'D',
target_value = parameters.D,
metric_name = 'mse',
best_function = min
),
best_permutation_metric_function_parameter(
parameter_name = 'R',
target_value = parameters.R,
metric_name = 'mse',
best_function = min
),
]
sample_functions = sample_function_parameters(
['logit_pi', 'expanded_pi', 'pi', 'D', 'R', 'LRinv'],
)
sampler = ARPHMMSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
evaluator = SamplerEvaluator(
sampler=sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few Gibbs Sampler steps
for _ in range(10):
evaluator.evaluate_sampler_step(['sample_gibbs', 'project_parameters'])
print(evaluator.metrics)
print(evaluator.samples)
## Run a few ADA_GRAD sampler steps
for _ in range(10):
evaluator.evaluate_sampler_step(
['step_adagrad', 'project_parameters'],
[dict(epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few SGRLD Steps
evaluator.sampler.parameters.pi_type = 'expanded'
for _ in range(10):
evaluator.evaluate_sampler_step(
['sample_sgrld', 'project_parameters'],
[dict(preconditioner=preconditioner,
epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
from sgmcmc_ssm.plotting_utils import plot_metrics, plot_trace_plot
plot_metrics(evaluator)
plot_trace_plot(evaluator)
| 8,407 | 30.02583 | 101 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/api_demos/lgssm_pf_demo.py | import numpy as np
from sgmcmc_ssm.models.lgssm import (
generate_lgssm_data,
LGSSMParameters,
LGSSMPrior,
LGSSMPreconditioner,
LGSSMHelper,
LGSSMSampler,
)
from tqdm import tqdm
np.random.seed(12345)
# Parameters
## Define LGSSM Parameters
A = np.eye(1)*0.95
C = np.eye(1)*1.0
Q = np.eye(1)*1.0
R = np.eye(1)*1.0
parameters = LGSSMParameters(A=A, C=C, Q=Q, R=R)
print(parameters)
## Access elements of parameters
print(parameters.A)
print(parameters.Q)
print(parameters.R)
## Dimension of parameters
print(parameters.dim)
## Parameters as dict or as flattened numpy vector
print(parameters.as_dict())
print(parameters.as_vector())
print(parameters.from_dict_to_vector(parameters.as_dict()))
print(parameters.from_vector_to_dict(parameters.as_vector(), **parameters.dim))
# Generate Data
T = 200
data = generate_lgssm_data(T=T, parameters=parameters)
## Synthetic Data Overview
print(data.keys())
print(data['observations'])
print(data['latent_vars'])
print(data['parameters'])
## Plot Data
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig, ax = plt.subplots(1, 1)
ax.plot(data['observations'][:,0], '-C0', label='observation')
ax.plot(data['latent_vars'][:,0], '--C1', label='latent')
ax.legend()
ax.set_title("Params A={0}, Q={1}, R={2}".format(A[0,0], Q[0,0], R[0,0]))
# LGSSM Prior
## Default Prior
prior = LGSSMPrior.generate_default_prior(**parameters.dim, var=1)
## Access Prior Parameters
print(prior.hyperparams)
## Sample from Prior
print(prior.sample_prior())
## Evaluate Log-Prior + Grad Log-Prior
print(prior.logprior(parameters=parameters))
print(prior.grad_logprior(parameters=parameters))
# LGSSM Helper
## Setup Helper
helper = LGSSMHelper(**parameters.dim)
## Evaluate Marginal Log-likelihood using different particle filters
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
N=100, tqdm=tqdm,))
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
N=1000, tqdm=tqdm,))
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
N=10000, tqdm=tqdm,))
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
pf='paris', N=100, tqdm=tqdm))
## Evaluate Predictive Log-likelihood
print(helper.pf_predictive_loglikelihood_estimate(
data['observations'], parameters, num_steps_ahead=10,
N=1000, tqdm=tqdm,))
## Evaluate Gradient (Score)
print(helper.pf_gradient_estimate(data['observations'], parameters,
N=1000, tqdm=tqdm,))
print(helper.pf_gradient_estimate(data['observations'], parameters,
N=10000, tqdm=tqdm,))
print(helper.pf_gradient_estimate(data['observations'], parameters,
pf='paris', N=1000, tqdm=tqdm,))
## Estimate Latent Mean and Covariance
def compare_smoothed_pfs(list_of_kwargs):
means_covs = {}
for kwargs in list_of_kwargs:
name = '{0} {1}'.format(kwargs.get('pf','Poyiadjis O(N)'),
kwargs.get('N'))
x_mean, x_cov = helper.pf_latent_var_distr(
observations=data['observations'],
parameters=parameters,
tqdm=tqdm,
**kwargs
)
x_mean, x_cov = x_mean[:,0], x_cov[:, 0,0]
means_covs[name] = x_mean, x_cov
fig, ax = plt.subplots(1, 1)
for ii, (name, (x_mean, x_cov)) in enumerate(means_covs.items()):
ax.plot(x_mean, '-C{0}'.format(ii), label=name)
ax.plot(x_mean+np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(x_mean-np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(data['latent_vars'], '-k', label='truth', alpha=0.8)
ax.legend()
return fig, ax
list_of_kwargs = [
dict(N = 100),
dict(N = 1000),
dict(N = 10000),
dict(pf='paris', N = 100),
dict(pf='paris', N = 1000),
#dict(pf='paris', N = 10000),
]
compare_smoothed_pfs(list_of_kwargs)
# LGSSM Sampler
## Setup Sampler
sampler = LGSSMSampler(**parameters.dim)
sampler.setup(data['observations'], prior, parameters.copy())
## Evaluate Log Joint
print(sampler.noisy_logjoint(kind='pf', return_loglike=True,
N=1000, tqdm=tqdm))
print(sampler.noisy_logjoint(kind='pf', return_loglike=True,
pf='paris', N=1000, tqdm=tqdm))
print(sampler.exact_logjoint(return_loglike=True))
## Evaluate Gradient
### Default uses full sequence
grad = sampler.noisy_gradient(kind='pf', N=1000, tqdm=tqdm)
print(grad)
grad = sampler.noisy_gradient(kind='pf', pf='paris', N=1000, tqdm=tqdm)
print(grad)
grad = sampler.noisy_gradient(kind='marginal', tqdm=tqdm)
print(grad)
# Note the naive PF is poor for long sequences
### Example with subsequence
print(sampler.noisy_gradient(kind='pf', N=1000,
subsequence_length=10, buffer_length=5, minibatch_size=10))
## Example SGD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_sgd(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example ADAGRAD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_adagrad(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example SGLD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgld(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Using Evaluator
from sgmcmc_ssm import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
metric_function_parameters,
)
metric_functions = [
noisy_logjoint_loglike_metric(kind='pf', N=1000),
metric_function_parameters(
parameter_names=['A', 'LQinv', 'LRinv'],
target_values=[parameters.A, parameters.LQinv, parameters.LRinv],
metric_names = ['mse', 'mse', 'mse'],
)
]
sample_functions = sample_function_parameters(
['A', 'Q', 'LQinv', 'R', 'LRinv'],
)
sampler = LGSSMSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
evaluator = SamplerEvaluator(
sampler=sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few ADA_GRAD sampler steps
for _ in tqdm(range(100)):
evaluator.evaluate_sampler_step(
['step_adagrad', 'project_parameters'],
[dict(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
from sgmcmc_ssm.plotting_utils import plot_metrics, plot_trace_plot
plot_metrics(evaluator)
plot_trace_plot(evaluator)
### Run a few SGLD Steps
#for _ in range(10):
# evaluator.evaluate_sampler_step(
# ['sample_sgld', 'project_parameters'],
# [dict(kind='pf', N=1000,
# epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
# )
#print(evaluator.metrics)
#print(evaluator.samples)
| 7,269 | 28.673469 | 81 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/api_demos/garch_long_demo.py | import numpy as np
from sgmcmc_ssm.models.garch import (
generate_garch_data,
GARCHParameters,
GARCHPrior,
GARCHHelper,
GARCHSampler,
)
from tqdm import tqdm
np.random.seed(12345)
# Parameters
## Define GARCH Parameters
alpha = 0.1
beta = 0.8
gamma = 0.05
tau = 0.3
log_mu, logit_phi, logit_lambduh = \
GARCHParameters.convert_alpha_beta_gamma(alpha, beta, gamma)
LRinv = np.array([[tau**-1]])
parameters = GARCHParameters(
log_mu=log_mu,
logit_phi=logit_phi,
logit_lambduh=logit_lambduh,
LRinv=LRinv,
)
print(parameters)
## Access elements of parameters
print(parameters.alpha)
print(parameters.beta)
print(parameters.gamma)
print(parameters.tau)
## Dimension of parameters
print(parameters.dim)
## Parameters as dict or as flattened numpy vector
print(parameters.as_dict())
print(parameters.as_vector())
print(parameters.from_dict_to_vector(parameters.as_dict()))
print(parameters.from_vector_to_dict(parameters.as_vector(), **parameters.dim))
# Generate Data
T = 200
data = generate_garch_data(T=T, parameters=parameters)
## Synthetic Data Overview
print(data.keys())
print(data['observations'])
print(data['latent_vars'])
print(data['parameters'])
## Plot Data
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig, axes = plt.subplots(3, 1, sharex=True)
axes[0].plot(data['observations'], '.C0', label='data')
axes[0].plot(data['latent_vars'], '-C1', label='latent_var')
axes[0].set_ylabel('raw observations')
axes[1].plot(data['observations']**2, '.C0', label='data^2')
axes[1].plot(data['latent_vars']**2, '-C1', label='latent_var^2')
axes[1].set_ylabel('observations^2')
axes[1].legend()
axes[2].plot(np.log10(data['observations']**2), '.C0', label='log10(data^2)')
axes[2].plot(np.log10(data['latent_vars']**2), '-C1', label='log10(latent_var^2)')
axes[2].set_ylabel('log10(observations^2)')
axes[2].legend()
fig.suptitle("{0}".format(str(parameters)))
# GARCH Prior
## Default Prior
prior = GARCHPrior.generate_default_prior(**parameters.dim, var=1)
## Access Prior Parameters
print(prior.hyperparams)
## Sample from Prior
print(prior.sample_prior())
## Evaluate Log-Prior + Grad Log-Prior
print(prior.logprior(parameters=parameters))
print(prior.grad_logprior(parameters=parameters))
# GARCH Helper
## Setup Helper
helper = GARCHHelper(**parameters.dim)
## Evaluate Marginal Log-likelihood using different particle filters
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
N=100, tqdm=tqdm,))
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
N=1000, tqdm=tqdm,))
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
N=10000, tqdm=tqdm,))
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
pf='paris', N=100, tqdm=tqdm))
## Evaluate Predictive Log-likelihood
print(helper.pf_predictive_loglikelihood_estimate(
data['observations'], parameters, num_steps_ahead=10,
N=1000, tqdm=tqdm,))
## Evaluate Gradient (Score)
print(helper.pf_gradient_estimate(data['observations'], parameters,
N=1000, tqdm=tqdm,))
print(helper.pf_gradient_estimate(data['observations'], parameters,
N=10000, tqdm=tqdm,))
print(helper.pf_gradient_estimate(data['observations'], parameters,
pf='paris', N=1000, tqdm=tqdm,))
## Estimate Latent Mean and Covariance
def compare_smoothed_pfs(list_of_kwargs, squared=False):
means_covs = {}
for kwargs in list_of_kwargs:
name = '{0} {1}'.format(kwargs.get('pf','Poyiadjis O(N)'),
kwargs.get('N'))
x_mean, x_cov = helper.pf_latent_var_distr(
observations=data['observations'],
parameters=parameters,
tqdm=tqdm,
squared=squared,
**kwargs
)
x_mean, x_cov = x_mean[:,0], x_cov[:, 0,0]
means_covs[name] = x_mean, x_cov
fig, ax = plt.subplots(1, 1)
for ii, (name, (x_mean, x_cov)) in enumerate(means_covs.items()):
ax.plot(x_mean, '-C{0}'.format(ii), label=name)
ax.plot(x_mean+np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(x_mean-np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
if squared:
ax.plot(data['latent_vars']**2, '-k', label='truth', alpha=0.8)
ax.set_ylabel('observations squared')
else:
ax.plot(data['latent_vars'], '-k', label='truth', alpha=0.8)
ax.set_ylabel('observations')
ax.legend()
return fig, ax
list_of_kwargs = [
dict(N = 100),
dict(N = 1000),
dict(N = 10000),
dict(pf='paris', N = 100),
dict(pf='paris', N = 1000),
#dict(pf='paris', N = 10000),
]
compare_smoothed_pfs(list_of_kwargs)
compare_smoothed_pfs(list_of_kwargs, squared=True)
# GARCH Sampler
## Setup Sampler
sampler = GARCHSampler(**parameters.dim)
sampler.setup(data['observations'], prior, parameters.copy())
## Evaluate Log Joint
print(sampler.noisy_logjoint(kind='pf', return_loglike=True,
N=1000, tqdm=tqdm))
print(sampler.noisy_logjoint(kind='pf', return_loglike=True,
pf='paris', N=1000, tqdm=tqdm))
# Note "exact_logjoint" will throw an error
#print(sampler.exact_logjoint(return_loglike=True))
## Evaluate Gradient
### Default uses full sequence
grad = sampler.noisy_gradient(kind='pf', N=1000, tqdm=tqdm)
print(grad)
### Example with subsequence
print(sampler.noisy_gradient(kind='pf', N=1000,
subsequence_length=10, buffer_length=5, minibatch_size=10))
## Example SGD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_sgd(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example ADAGRAD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_adagrad(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example SGLD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgld(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Using Evaluator
from sgmcmc_ssm import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
metric_function_parameters,
)
metric_functions = [
noisy_logjoint_loglike_metric(kind='pf', N=1000),
metric_function_parameters(
parameter_names=['alpha', 'beta', 'gamma', 'tau'],
target_values=[parameters.alpha, parameters.beta,
parameters.gamma, parameters.tau],
metric_names = ['mse', 'mse', 'mse', 'mse'],
)
]
sample_functions = sample_function_parameters(
['alpha', 'beta', 'gamma', 'tau'],
)
sampler = GARCHSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
evaluator = SamplerEvaluator(
sampler=sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few ADA_GRAD sampler steps
for _ in tqdm(range(100)):
evaluator.evaluate_sampler_step(
['step_adagrad', 'project_parameters'],
[dict(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
from sgmcmc_ssm.plotting_utils import plot_metrics, plot_trace_plot
plot_metrics(evaluator)
plot_trace_plot(evaluator)
### Run a few SGLD Steps
#for _ in range(10):
# evaluator.evaluate_sampler_step(
# ['sample_sgld', 'project_parameters'],
# [dict(kind='pf', N=1000,
# epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
# )
#print(evaluator.metrics)
#print(evaluator.samples)
| 8,081 | 29.383459 | 82 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/api_demos/svm_long_demo.py | import numpy as np
from sgmcmc_ssm.models.svm import (
generate_svm_data,
SVMParameters,
SVMPrior,
SVMHelper,
SVMSampler,
)
from tqdm import tqdm
np.random.seed(12345)
# Parameters
## Define SVM Parameters
A = np.eye(1)*0.95
Q = np.eye(1)*1.0
R = np.eye(1)*1.0
LQinv = np.linalg.cholesky(np.linalg.inv(Q))
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = SVMParameters(A=A, LQinv=LQinv, LRinv=LRinv)
print(parameters)
## Access elements of parameters
print(parameters.A)
print(parameters.Q)
print(parameters.R)
## Dimension of parameters
print(parameters.dim)
## Parameters as dict or as flattened numpy vector
print(parameters.as_dict())
print(parameters.as_vector())
print(parameters.from_dict_to_vector(parameters.as_dict()))
print(parameters.from_vector_to_dict(parameters.as_vector(), **parameters.dim))
# Generate Data
T = 200
data = generate_svm_data(T=T, parameters=parameters)
## Synthetic Data Overview
print(data.keys())
print(data['observations'])
print(data['latent_vars'])
print(data['parameters'])
## Plot Data
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig, axes = plt.subplots(3, 1, sharex=True)
axes[0].plot(data['observations'], '.C0', label='data')
axes[0].set_ylabel('raw observations')
axes[1].plot(data['observations']**2, '.C0', label='data^2')
axes[1].plot(parameters.R*np.exp(data['latent_vars']), '-C1', label='R*exp(latent_var)')
axes[1].set_ylabel('observations^2')
axes[1].legend()
axes[2].plot(np.log(data['observations']**2)-np.log(parameters.R), '.C0', label='log(data^2) - log(R)')
axes[2].plot(data['latent_vars'], '-C1', label='latent_var')
axes[2].set_ylabel('log(observations^2)')
axes[2].legend()
fig.suptitle("Params A={0}, Q={1}, R={2}".format(A[0,0], Q[0,0], R[0,0]))
# SVM Prior
## Default Prior
prior = SVMPrior.generate_default_prior(**parameters.dim, var=1)
## Access Prior Parameters
print(prior.hyperparams)
## Sample from Prior
print(prior.sample_prior())
## Evaluate Log-Prior + Grad Log-Prior
print(prior.logprior(parameters=parameters))
print(prior.grad_logprior(parameters=parameters))
# SVM Helper
## Setup Helper
helper = SVMHelper(**parameters.dim)
prior_var = helper.default_forward_message['precision'][0,0]**-1
prior_mean = helper.default_forward_message['mean_precision'][0] * prior_var
## Evaluate Marginal Log-likelihood using different particle filters
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
N=100, tqdm=tqdm,))
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
N=1000, tqdm=tqdm,))
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
N=10000, tqdm=tqdm,))
print(helper.pf_loglikelihood_estimate(data['observations'], parameters,
pf='paris', N=100, tqdm=tqdm))
## Evaluate Predictive Log-likelihood
print(helper.pf_predictive_loglikelihood_estimate(
data['observations'], parameters, num_steps_ahead=10,
N=1000, tqdm=tqdm,))
## Evaluate Gradient (Score)
print(helper.pf_gradient_estimate(data['observations'], parameters,
N=1000, tqdm=tqdm,))
print(helper.pf_gradient_estimate(data['observations'], parameters,
N=10000, tqdm=tqdm,))
print(helper.pf_gradient_estimate(data['observations'], parameters,
pf='paris', N=1000, tqdm=tqdm,))
## Estimate Latent Mean and Covariance
def compare_smoothed_pfs(list_of_kwargs):
means_covs = {}
for kwargs in list_of_kwargs:
name = '{0} {1}'.format(kwargs.get('pf','Poyiadjis O(N)'),
kwargs.get('N'))
x_mean, x_cov = helper.pf_latent_var_distr(
observations=data['observations'],
parameters=parameters,
tqdm=tqdm,
**kwargs
)
x_mean, x_cov = x_mean[:,0], x_cov[:, 0,0]
means_covs[name] = x_mean, x_cov
fig, ax = plt.subplots(1, 1)
for ii, (name, (x_mean, x_cov)) in enumerate(means_covs.items()):
ax.plot(x_mean, '-C{0}'.format(ii), label=name)
ax.plot(x_mean+np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(x_mean-np.sqrt(x_cov), "--C{}".format(ii), alpha=0.5)
ax.plot(data['latent_vars'], '-k', label='truth', alpha=0.8)
ax.legend()
return fig, ax
list_of_kwargs = [
dict(N = 100),
dict(N = 1000),
dict(N = 10000),
dict(pf='paris', N = 100),
dict(pf='paris', N = 1000),
#dict(pf='paris', N = 10000),
]
compare_smoothed_pfs(list_of_kwargs)
# SVM Sampler
## Setup Sampler
sampler = SVMSampler(**parameters.dim)
sampler.setup(data['observations'], prior, parameters.copy())
## Evaluate Log Joint
print(sampler.noisy_logjoint(kind='pf', return_loglike=True,
N=1000, tqdm=tqdm))
print(sampler.noisy_logjoint(kind='pf', return_loglike=True,
pf='paris', N=1000, tqdm=tqdm))
# Note "exact_logjoint" will throw an error
#print(sampler.exact_logjoint(return_loglike=True))
## Evaluate Gradient
### Default uses full sequence
grad = sampler.noisy_gradient(kind='pf', N=1000, tqdm=tqdm)
print(grad)
### Example with subsequence
print(sampler.noisy_gradient(kind='pf', N=1000,
subsequence_length=10, buffer_length=5, minibatch_size=10))
## Example SGD Step
sampler.parameters = sampler.prior.sample_prior().project_parameters()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_sgd(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example ADAGRAD Step
sampler.parameters = sampler.prior.sample_prior().project_parameters()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_adagrad(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example SGLD Step
sampler.parameters = sampler.prior.sample_prior().project_parameters()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgld(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Using Evaluator
from sgmcmc_ssm import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
metric_function_parameters,
)
metric_functions = [
noisy_logjoint_loglike_metric(kind='pf', N=1000),
metric_function_parameters(
parameter_names=['A', 'LQinv', 'LRinv'],
target_values=[parameters.A, parameters.LQinv, parameters.LRinv],
metric_names = ['mse', 'mse', 'mse'],
)
]
sample_functions = sample_function_parameters(
['A', 'Q', 'LQinv', 'R', 'LRinv'],
)
sampler = SVMSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
evaluator = SamplerEvaluator(
sampler=sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few ADA_GRAD sampler steps
for _ in tqdm(range(100)):
evaluator.evaluate_sampler_step(
['step_adagrad', 'project_parameters'],
[dict(kind='pf', N=1000,
epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
from sgmcmc_ssm.plotting_utils import plot_metrics, plot_trace_plot
plot_metrics(evaluator)
plot_trace_plot(evaluator)
### Run a few SGLD Steps
#for _ in range(10):
# evaluator.evaluate_sampler_step(
# ['sample_sgld', 'project_parameters'],
# [dict(kind='pf', N=1000,
# epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
# )
#print(evaluator.metrics)
#print(evaluator.samples)
| 7,747 | 29.868526 | 103 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/demo/api_demos/lgssm_long_demo.py | import numpy as np
from sgmcmc_ssm.models.lgssm import (
generate_lgssm_data,
LGSSMParameters,
LGSSMPrior,
LGSSMPreconditioner,
LGSSMHelper,
LGSSMSampler,
)
np.random.seed(12345)
# Parameters
## Define LGSSM Parameters
A = np.eye(2)*0.9
Q = np.eye(2)*0.1
C = np.eye(2)
R = np.eye(2)*0.5
LQinv = np.linalg.cholesky(np.linalg.inv(Q))
LRinv = np.linalg.cholesky(np.linalg.inv(R))
parameters = LGSSMParameters(A=A, C=C, LQinv=LQinv, LRinv=LRinv)
print(parameters)
## Access elements of parameters
print(parameters.A)
print(parameters.Q)
print(parameters.C)
print(parameters.R)
## Dimension of parameters
print(parameters.dim)
## Parameters as dict or as flattened numpy vector
print(parameters.as_dict())
print(parameters.as_vector())
print(parameters.from_dict_to_vector(parameters.as_dict()))
print(parameters.from_vector_to_dict(parameters.as_vector(), **parameters.dim))
# Generate Data
T = 1000
data = generate_lgssm_data(T=1000, parameters=parameters)
## Synthetic Data Overview
print(data.keys())
print(data['observations'])
print(data['latent_vars'])
print(data['parameters'])
## Plot Data
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig, axes = plt.subplots(2, 2, sharex=True)
axes[0,0].plot(data['observations'][:,0], '-C0', label='observation')
axes[0,0].plot(data['latent_vars'][:,0], '--C1', label='latent')
axes[0,1].plot(data['observations'][:,1], '-C0', label='observation')
axes[0,1].plot(data['latent_vars'][:,1], '--C1', label='latent')
axes[0,1].legend()
axes[1,0].plot(data['observations'][:,0] - data['latent_vars'][:,0], '-C2', label='residual')
axes[1,1].plot(data['observations'][:,1] - data['latent_vars'][:,1], '-C2', label='residual')
axes[1,1].legend()
# LGSSM Prior
## Default Prior
prior = LGSSMPrior.generate_default_prior(**parameters.dim, var=1)
## Access Prior Parameters
print(prior.hyperparams)
## Sample from Prior
print(prior.sample_prior())
## Evaluate Log-Prior + Grad Log-Prior
print(prior.logprior(parameters=parameters))
print(prior.grad_logprior(parameters=parameters))
# LGSSM Helper
## Setup Helper
helper = LGSSMHelper(**parameters.dim)
## Forward + Backward Message Passing
print(helper.forward_message(data['observations'], parameters))
forward_messages = helper.forward_pass(data['observations'], parameters, include_init_message=True)
backward_messages = helper.backward_pass(data['observations'], parameters, include_init_message=True)
## Evaluate Marginal Log-likelihood
print(helper.marginal_loglikelihood(data['observations'], parameters))
for f_m, b_m in zip(forward_messages, backward_messages):
print(helper.marginal_loglikelihood(np.array([]), parameters, f_m, b_m))
## Evaluate Gradient Marginal Log-likelihood
print(helper.gradient_marginal_loglikelihood(data['observations'], parameters))
## Evaluate Predictive Log-likelihood
print(helper.predictive_loglikelihood(data['observations'], parameters, lag=10))
## Gibbs Sampler Sufficient Statistic
sufficient_stat = helper.calc_gibbs_sufficient_statistic(
data['observations'], data['latent_vars'])
print(sufficient_stat)
## Sampler parameters using Gibbs
print(helper.parameters_gibbs_sample(
data['observations'], data['latent_vars'], prior
))
## Sample latent variables using Gibbs
### Default is smoothed distribution
xhat = helper.latent_var_sample(data['observations'], parameters)
fig, axes = plt.subplots(1, 2, sharex=True)
axes[0].plot(data['latent_vars'][:,0], 'C0', label='truth')
axes[0].plot(xhat[:,0], ':C1', label='inferred')
axes[1].plot(data['latent_vars'][:,1], 'C0', label='truth')
axes[1].plot(xhat[:,1], ':C1', label='inferred')
axes[1].legend()
### Sample latent variables from filtered/predictive distribution
print(helper.latent_var_sample(data['observations'], parameters, distribution="filtered"))
print(helper.latent_var_sample(data['observations'], parameters, distribution="predictive"))
# LGSSM Preconditioner
preconditioner = LGSSMPreconditioner()
grad = helper.gradient_marginal_loglikelihood(data['observations'], parameters)
## Precondition Gradient
print(grad)
print(preconditioner.precondition(grad, parameters))
## Preconditioned Noise + Correction term
print(preconditioner.precondition_noise(parameters))
print(preconditioner.correction_term(parameters))
# LGSSM Sampler
## Setup Sampler
sampler = LGSSMSampler(**parameters.dim)
sampler.setup(data['observations'], prior, parameters.copy())
## Evaluate Log Joint
print(sampler.exact_logjoint(return_loglike=True))
## Evaluate Gradient
### Default uses full sequence
grad = sampler.noisy_gradient()
print(grad)
### Example with subsequence
print(sampler.noisy_gradient(subsequence_length=10, buffer_length=5, minibatch_size=10))
## Preconditioned Gradient
precond_grad = sampler.noisy_gradient(preconditioner=preconditioner)
print(precond_grad)
### Example with subsequence
print(sampler.noisy_gradient(
preconditioner=preconditioner,
subsequence_length=10, buffer_length=5, minibatch_size=10))
## Example Gibbs Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_gibbs().project_parameters())
## Example SGD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_sgd(epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example ADAGRAD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.step_adagrad(epsilon=0.1, subsequence_length=10, buffer_length=5
).project_parameters())
## Example SGLD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgld(epsilon=0.1).project_parameters())
## Example SGRLD Step
sampler.parameters = sampler.prior.sample_prior()
print(sampler.parameters)
for _ in range(5):
print(sampler.sample_sgrld(epsilon=0.1, preconditioner=preconditioner).project_parameters())
## Using Evaluator
from sgmcmc_ssm import SamplerEvaluator
from sgmcmc_ssm.metric_functions import (
sample_function_parameters,
noisy_logjoint_loglike_metric,
metric_function_parameters,
)
metric_functions = [
noisy_logjoint_loglike_metric(),
metric_function_parameters(
parameter_names=['A', 'Q', 'C', 'R'],
target_values=[parameters.A, parameters.Q,
parameters.C, parameters.R],
metric_names = ['mse', 'mse', 'mse', 'mse'],
)
]
sample_functions = sample_function_parameters(
['A', 'Q', 'LQinv', 'C', 'R', 'LRinv'],
)
sampler = LGSSMSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
evaluator = SamplerEvaluator(
sampler=sampler,
metric_functions=metric_functions,
sample_functions=sample_functions,
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few Gibbs Sampler steps
for _ in range(10):
evaluator.evaluate_sampler_step(['sample_gibbs', 'project_parameters'])
print(evaluator.metrics)
print(evaluator.samples)
## Run a few ADA_GRAD sampler steps
for _ in range(10):
evaluator.evaluate_sampler_step(
['step_adagrad', 'project_parameters'],
[dict(epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
## Run a few SGRLD Steps
for _ in range(10):
evaluator.evaluate_sampler_step(
['sample_sgrld', 'project_parameters'],
[dict(preconditioner=preconditioner,
epsilon=0.1, subsequence_length=10, buffer_length=5), {}],
)
print(evaluator.metrics)
print(evaluator.samples)
from sgmcmc_ssm.plotting_utils import plot_metrics, plot_trace_plot
plot_metrics(evaluator)
plot_trace_plot(evaluator)
| 7,933 | 29.633205 | 101 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/driver_utils.py | import numpy as np
import pandas
import contextlib
import os
import joblib
import io
import time
from tqdm import tqdm
import logging # For Logs
logger = logging.getLogger(name=__name__)
def script_builder(
script_name,
python_script_path,
python_script_args,
path_to_shell_script,
project_root,
deploy_target="desktop",
script_splits=1,
conda_env_name=None,
**kwargs,
):
""" Create Shell Scripts for running experiments
Args:
script_name (string): name of script / experiment
python_script_path (path): path to python script
python_script_args (list of dicts): to pass to python script
path_to_shell_script (path): path to shell script folder
project_root (path): path to project root
deploy_target (string): desktop
script_splits (int): number of splits for desktop
conda_env_name (string): conda virtual env to activate
Details:
Saves generated scripts into `<path_to_shell_script>/script_name`
Logs are saved to `<path_to_shell_script>/logs`
"""
logger.info("Setting Up Script Files for {0}".format(script_name))
path_to_shell_script = os.path.join(path_to_shell_script, script_name)
# Create Directories
path_to_logs = os.path.join(path_to_shell_script,
"{0}_logs".format(script_name))
make_path(path_to_logs)
logger.info("Setting up {0} experiments...".format(len(python_script_args)))
if deploy_target == "desktop":
bash_file_masters = create_desktop_jobs(
list_of_args=python_script_args,
script_name=script_name,
python_script_path=python_script_path,
path_to_shell_script=path_to_shell_script,
path_to_logs=path_to_logs,
project_root=project_root,
conda_env_name=conda_env_name,
script_splits=script_splits,
)
else:
raise ValueError("Unrecognized deploy_target {0}".format(deploy_target))
return bash_file_masters
def create_desktop_jobs(
list_of_args,
script_name,
python_script_path,
path_to_shell_script,
path_to_logs,
project_root,
conda_env_name,
script_splits,
):
bash_file_masters=[None]*script_splits
for split in range(script_splits):
if script_splits > 1:
bash_file_master = os.path.join(path_to_shell_script,
"{0}_script_{1}.sh".format(script_name, split))
else:
bash_file_master = os.path.join(path_to_shell_script,
"{0}_script.sh".format(script_name))
# Write Shell Script
with open(bash_file_master, "w") as f:
f.write("#!/bin/bash\n")
f.write("\n")
f.write("cd " + project_root + "\n")
f.write("\n")
if conda_env_name is not None:
f.write("source activate {0}\n".format(conda_env_name))
for ii, args in enumerate(list_of_args):
if (ii >= split*len(list_of_args)/script_splits) and \
(ii < (split+1)*len(list_of_args)/script_splits):
log_name = "{0:0>3}.log".format(
args.get('--experiment_id', 0))
log_file_name = os.path.join(path_to_logs, log_name)
python_args = ops_dict_to_string_args(args)
f.write("\n")
f.write("python " + python_script_path + " " +
python_args)
f.write(" |& tee " + log_file_name + ".out\n")
os.chmod(bash_file_master, 0o755)
bash_file_masters[split] = bash_file_master
return bash_file_masters
def make_path(path):
# Helper function for making directories
if path is not None:
if not os.path.isdir(path):
if os.path.exists(path):
raise ValueError(
"path {0} is any existing file location!".format(path)
)
else:
# To avoid race conditions
wait_time = np.random.rand()*2
logging.info("Pausing for {0:2.2f} sec to make {1}".format(
wait_time, path))
time.sleep(wait_time)
if os.path.isdir(path):
return
# Make Dirs
try:
os.makedirs(path)
except OSError as e:
logger.error(e.strerror)
import errno
if e.errno == errno.EEXIST:
logger.info("Ignoring Race Condition Error")
pass
else:
raise e
return
class TqdmToLogger(io.StringIO):
"""
Output stream for TQDM which will output to logger module instead of
the StdOut.
From https://github.com/tqdm/tqdm/issues/313
"""
logger = None
level = None
buf = ''
def __init__(self,logger,level=None):
super(TqdmToLogger, self).__init__()
self.logger = logger
self.level = level or logging.INFO
def write(self,buf):
self.buf = buf.strip('\r\n\t ')
def flush(self):
self.logger.log(self.level, self.buf)
def ops_dict_to_string_args(arg_dict, sep=" "):
string_args = ""
for k, v in arg_dict.items():
if type(v) is str:
string_args += k + sep + '"' + str(v) + '"' + sep
elif v is None:
string_args += k + sep
else:
string_args += k + sep + str(v) + sep
return(string_args)
## Context Writers
# See https://stackoverflow.com/questions/42409707/pandas-to-csv-overwriting-prevent-data-loss?rq=1 for explanation on why we use context managers
@contextlib.contextmanager
def atomic_overwrite(filename):
temp = filename + "~"
with open(temp, "w") as f:
yield f
os.rename(temp, filename)
def pandas_write_df_to_csv(df, filename, **kwargs):
""" Write DF to filename (via temp file)
Optional kwargs:
index = False
Note a better solution is to use mode='a'
"""
with atomic_overwrite(filename) as f:
df.to_csv(f, **kwargs)
return
@contextlib.contextmanager
def atomic_overwrite_binary(filename):
temp = filename + "~"
with open(temp, "wb") as f:
yield f
os.rename(temp, filename)
def joblib_write_to_file(data, filename, **kwargs):
with atomic_overwrite_binary(filename) as f:
joblib.dump(data, f, **kwargs)
return
#
| 6,683 | 31.926108 | 146 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/trace_metric_functions.py | """
Metric Functions for full parameter traces
(e.g. Kernel Stein Divergence)
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
from itertools import product
import logging
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
logger = logging.getLogger(name=__name__)
def IMQ_KSD(x, gradlogp, c=1, beta=0.5, max_block_size=1000, tqdm_out=None):
""" Uses the Inverse MultiQuadratic Kernel Stein Discrepancy (IMQ KSD)
IMQ(x,y) = (c^2 + (x-y)^T(x-y))^-beta (for c in \R, beta in (0,1))
See SteinDiscrepancy.jl (on GitHub) for full details
Args:
x (ndarray): num_points by d
gradlogp (ndarray): num_points by d
c (double): parameter of IMQ
beta (double): parameter of IMQ
Returns:
IMQ_KSD (double)
"""
c2 = c**2
if x.shape != gradlogp.shape:
raise ValueError("x and gradlogp dimensions do not match")
IMQ_KSD_sum = 0
if np.shape(x)[0] <= max_block_size:
blocks = [np.arange(np.shape(x)[0], dtype=int)]
else:
chunks = int(np.ceil(np.shape(x)[0]*1.0/max_block_size))
blocks = [np.arange(max_block_size, dtype=int) + max_block_size*chunk
for chunk in range(chunks)]
blocks[-1] = blocks[-1][blocks[-1] < np.shape(x)[0]]
block_pairs = list(product(blocks, blocks))
if len(block_pairs) == 1:
p_bar = block_pairs
else:
p_bar = tqdm(block_pairs, file=tqdm_out, mininterval=60)
for block0, block1 in p_bar:
index0_, index1_ = np.meshgrid(block0, block1)
index0 = index0_.flatten()
index1 = index1_.flatten()
x0 = x[index0]
x1 = x[index1]
gradlogp0 = gradlogp[index0]
gradlogp1 = gradlogp[index1]
dim_x = np.shape(x)[1]
diff = x0-x1
diff2 = np.sum(diff**2, axis=1)
# Calculate KSD
base = diff2 + c2
base_beta = base**-beta
base_beta1 = base_beta/base
kterm_sum = np.sum(np.sum(gradlogp0*gradlogp1, axis=1) * base_beta)
coeffgrad = -2.0 * beta * base_beta1
gradx0term_sum = np.sum(np.sum(gradlogp0*-diff, axis=1) * coeffgrad)
gradx1term_sum = np.sum(np.sum(gradlogp1*diff, axis=1) * coeffgrad)
gradx0x1term_sum = np.sum((-dim_x + 2*(beta+1)*diff2/base) * coeffgrad)
IMQ_KSD_sum += kterm_sum + gradx0term_sum + gradx1term_sum + gradx0x1term_sum
IMQ_KSD = np.sqrt(IMQ_KSD_sum)/np.shape(x)[0]
return IMQ_KSD
def compute_KSD(param_list, grad_list, variables=None, **kwargs):
""" kwargs are passed to IMQ_KDS
Args:
param_list (list of Parameters): list of parameters used
grad_list (list of np.ndarray): must be same size as `variables`
variables (list of string): variables of Parameters to calculate KSD
"""
res = {}
if variables is not None:
logger.info("Processing parameters_list for variables {0}".format(
variables))
logger.info("Processing grad_list for variables {0}".format(
variables))
for ii, var in enumerate(variables):
if hasattr(param_list[0], var):
x = np.array([getattr(parameters, var).flatten()
for parameters in param_list])
gradlogp = np.array([grad[ii] for grad in grad_list])
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if len(gradlogp.shape) == 1:
gradlogp = np.reshape(gradlogp, (-1, 1))
logger.info("Calculating IMQ_KSD for {0}".format(var))
res[var] = IMQ_KSD(x, gradlogp, **kwargs)
else:
logger.warning("Did not find {0} in parameters".format(var))
return res
| 3,870 | 31.805085 | 85 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/_utils.py | """
Utilities functions
"""
import numpy as np
import pandas as pd
import logging
import scipy
import scipy.linalg.lapack
from copy import deepcopy
logger = logging.getLogger(name=__name__)
# Random Categorical
def random_categorical(pvals, size=None):
out = np.random.multinomial(n=1, pvals=pvals, size=size).dot(
np.arange(len(pvals)))
if size is None:
return int(out)
else:
return np.array(out, dtype=int)
# Fixed Wishart
def array_wishart_rvs(df, scale, **kwargs):
""" Wrapper around scipy.stats.wishart to always return a np.array """
if np.size(scale) == 1:
return np.array([[
scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs()
]])
else:
return scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs()
def array_invwishart_rvs(df, scale, **kwargs):
""" Wrapper around scipy.stats.invwishart to always return a np.array """
if np.size(scale) == 1:
return np.array([[
scipy.stats.invwishart(df=df, scale=scale, **kwargs).rvs()
]])
else:
return scipy.stats.invwishart(df=df, scale=scale, **kwargs).rvs()
def array_invgamma_rvs(shape, scale, **kwargs):
""" Wrapper around scipy.stats.wishart to always return a np.array """
if np.size(scale) == 1:
return np.array([
scipy.stats.invgamma(a=shape, scale=scale, **kwargs).rvs()
])
else:
return scipy.stats.invgamma(a=shape, scale=scale, **kwargs).rvs()
# Matrix Normal LogPDF
def matrix_normal_logpdf(X, mean, Lrowprec, Lcolprec):
""" Numerical stable matrix normal logpdf
(when cholesky of precision matrices are known)
Args:
X (n by m ndarray): random variable instance
mean (n by m ndarray): mean
Lrowprec (n by n ndarray): chol of pos def row covariance (i.e. U^{-1})
Lcolprec (m by m ndarray): chol of pos def col covariance (i.e. V^{-1})
Returns:
logpdf = (-1/2*tr(V^{-1}(X-M)U^{-1}(X-M)) - nm/2*log(2pi) +
m/2*log|U^{-1}| + n/2*log|V^{-1}|)
"""
n, m = np.shape(X)
logpdf = -0.5*n*m*np.log(2*np.pi)
logpdf += -0.5*np.sum(np.dot(Lrowprec.T, np.dot(X-mean, Lcolprec))**2)
logpdf += m*np.sum(np.log(np.diag(Lrowprec)))
logpdf += n*np.sum(np.log(np.diag(Lcolprec)))
return logpdf
def normal_logpdf(X, mean, Lprec):
""" normal logpdf
Returns:
logpdf= -1/2*(X-mean)'*prec*(X-mean) + sum(log(Lprec)) - m/2*log(2pi)
"""
m = np.size(X)
delta = np.dot(Lprec, X-mean)
logpdf = -0.5*m*np.log(2*np.pi)
logpdf += -0.5*np.dot(delta, delta)
logpdf += np.sum(np.log(np.diag(Lprec)))
return logpdf
# Positive Definite Matrix Inverse
def pos_def_mat_inv(mat):
""" Return inverse(mat)
Uses LAPACK.DPOTRF and LAPACK.DPOTRI to compute the inverse
using cholesky decomposition
See also: https://stackoverflow.com/questions/40703042/more-efficient-way-to-invert-a-matrix-knowing-it-is-symmetric-and-positive-semi
"""
if np.isscalar(mat):
return mat ** -1
zz, info = scipy.linalg.lapack.dpotrf(mat, False, False)
if info != 0:
raise RuntimeError("Error in Cholesky Decomposition")
inv_M, info = scipy.linalg.lapack.dpotri(zz)
if info != 0:
raise RuntimeError("Error in Cholesky Inverse")
inv = np.triu(inv_M) + np.triu(inv_M, k=1).T
return inv
def pos_def_log_det(mat):
""" Return log_det(mat)
Uses LAPACK.DPOTRF to compute the cholesky decomposition
"""
if np.isscalar(mat):
return np.log(mat)
zz, info = scipy.linalg.lapack.dpotrf(mat, False, False)
if info != 0:
raise RuntimeError("Error in Cholesky Decomposition")
logdet_mat = np.sum(np.log(np.diag(zz)))*2.0
return logdet_mat
def lower_tri_mat_inv(lower_tri_mat):
""" Return inverse(lower_tri_mat)
Uses LAPACK.DTRTRI
"""
if np.isscalar(lower_tri_mat):
return lower_tri_mat ** -1
lower_tri_inv, info = scipy.linalg.lapack.dtrtri(lower_tri_mat, True, False)
if info != 0:
raise RuntimeError("Error in Lower Triangular Inverse")
return lower_tri_inv
# Convert Lower Triangular Mat Vector to Matrix
def tril_vector_to_mat(vec):
n = int(np.sqrt(len(vec)*2))
mat = np.zeros((n,n), dtype=float)
mat[np.tril_indices(n)] = vec
return mat
# Symmetrize Matrices
def sym(mat):
if np.isscalar(mat):
return mat
else:
return (mat + np.swapaxes(mat, -1, -2))/2.0
# Stability of VAR(p)
def varp_stability_projection(A, eigenvalue_cutoff=0.9999,
var_name='A', logger=logger):
""" Threshold VAR(p) A to have stable eigenvalues """
m, mp = np.shape(A)
p = mp//m
A_stable = A
if m > 1 or p > 1:
F = np.concatenate([A, np.eye(N=m*(p-1), M = m*p)])
lambduhs = np.linalg.eig(F)[0]
largest_eigenvalue = np.max(np.abs(lambduhs))
if largest_eigenvalue > eigenvalue_cutoff:
logger.info("Thresholding Largest Eigenval F({2}): {0} > {1}".format(
largest_eigenvalue, eigenvalue_cutoff, var_name))
for ii in range(p):
A_stable[:, m*ii:m*(ii+1)] *= \
(eigenvalue_cutoff/largest_eigenvalue)**(ii+1)
else:
largest_eigenvalue = np.abs(A[0,0])
if largest_eigenvalue > eigenvalue_cutoff:
logger.info("Thresholding |{2}|: {0} > {1}".format(
largest_eigenvalue, eigenvalue_cutoff, var_name))
A_stable *= (eigenvalue_cutoff/largest_eigenvalue)
return A_stable
# Asymptotic precision for VAR(1)
def var_stationary_precision(Qinv, A, num_iters=50):
""" Approximate the stationary precision matrix of a VAR """
precision = Qinv
QinvA = np.dot(Qinv, A)
AtQinvA = np.dot(A.T, QinvA)
for ii in range(num_iters):
precision = Qinv - \
np.dot(QinvA, np.linalg.solve(precision + AtQinvA, QinvA.T))
return precision
| 5,978 | 31.318919 | 138 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/plotting_utils.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import logging
logger = logging.getLogger(name=__name__)
def plot_metrics(evaluator, full_trace=True, burnin=None,
x='iteration'):
df = evaluator.get_metrics()
df['variable:metric'] = df['variable']+':'+df['metric']
if burnin is None:
if not full_trace:
df = df.query('iteration >= {0}'.format(evaluator.iteration/2))
else:
df = df.query('iteration >= {0}'.format(burnin))
df = df.sort_values(['variable:metric', 'iteration'])
g = sns.FacetGrid(df, col='variable:metric',
col_wrap=4, sharey=False).map(
plt.plot, x, 'value'
).add_legend().set_titles("{col_name}")
return g
def compare_metrics(evaluators, full_trace=True, burnin=None, errorband=False,
x='iteration'):
# Concat Evaluator Metrics
if isinstance(next(iter(evaluators.keys())), tuple):
df = pd.concat(
[evaluator.get_metrics().assign(method=name, init=init)
for ((name, init), evaluator) in evaluators.items()],
ignore_index=True,
)
else:
df = pd.concat(
[evaluator.get_metrics().assign(method=name, init=0)
for (name, evaluator) in evaluators.items()],
ignore_index=True,
)
df['variable:metric'] = df['variable']+':'+df['metric']
# Subset Data
if burnin is None:
if not full_trace:
min_iteration = min([evaluator.iteration
for evaluator in evaluators.values()])
df = df.query('iteration >= {0}'.format(min_iteration/2))
else:
df = df.query('iteration >= {0}'.format(burnin))
df = df.sort_values(['method', 'variable:metric', 'iteration'])
if errorband:
# TODO set x to mean(x) when groupby iteration if x != iteration
g = sns.FacetGrid(df, col='variable:metric', hue="method",
col_wrap=4, sharey=False).map_dataframe(
sns.lineplot, x=x, y='value',
estimator='mean', ci='sd',
)
else:
g = sns.FacetGrid(df, col='variable:metric', hue="method",
col_wrap=4, sharey=False).map_dataframe(
sns.lineplot, x=x, y='value',
units='init', estimator=None, ci=None,
)
g = g.add_legend().set_titles("{col_name}").set_xlabels(x)
return g
def plot_trace_plot(evaluator, full_trace=True, query_string=None,
single_variables=[], burnin=None, x='iteration'):
samples_df = evaluator.get_samples()
if burnin is None:
if not full_trace:
samples_df = samples_df.query('iteration >= {0}'.format(evaluator.iteration/2))
else:
samples_df = samples_df.query('iteration >= {0}'.format(burnin))
if query_string is not None:
samples_df = samples_df.query(query_string)
variables = samples_df['variable'].sort_values().unique()
xs = samples_df[x].sort_values().unique()
variable_values = {
key: np.array(df.sort_values(x)['value'].tolist())
for key, df in samples_df.groupby('variable')
}
# Construct Plots
num_states = getattr(evaluator.sampler, 'num_states', 1)
fig, axes = plt.subplots(len(variables), num_states,
sharex='col', sharey='row',
figsize=(4, len(variables)*3),
)
if num_states == 1:
axes = np.array([[ax] for ax in axes])
for ii_var, variable in enumerate(variables):
values = variable_values[variable]
for k in range(num_states):
ax = axes[ii_var, k]
ax.set_title('{0}_{1}'.format(variable, k))
if variable in single_variables:
values = np.reshape(values, (values.shape[0], -1))
for dim in range(values.shape[1]):
ax.plot(xs, values[:, dim],
label='{0}[{1}]'.format(variable, dim))
else:
values = np.reshape(values, (values.shape[0], num_states, -1))
for dim in range(values.shape[2]):
ax.plot(xs, values[:, k, dim],
label='{0}[{1}]'.format(variable, dim))
if k == num_states-1:
ax.legend()
return fig, axes
def plot_svm_data_fit(observations, true_latent_vars=None,
sampler=None, tqdm=None, N=10000,
ignore_warning=False):
""" Plot fit of SVM to the Data """
if observations.shape[0] > 1000 and not ignore_warning:
raise ValueError("PF inference for observations > 1000 is slow")
fig, axes = plt.subplots(2, 1, sharex=True)
axes[0].plot(observations, 'oC0', label='data')
axes[0].set_ylabel('raw observations')
axes[1].plot(np.log(observations**2)-np.mean(np.log(observations**2)), 'oC0',
label='log(data^2) - mean(log(data^2))')
axes[1].set_ylabel('log(observations^2)')
if true_latent_vars is not None:
axes[1].plot(true_latent_vars, '-C1', label='latent_var')
if sampler is not None:
from sgmcmc_ssm.models.svm import SVMSampler
if not isinstance(sampler, SVMSampler):
raise ValueError("sampler must be an SVMSampler")
smoothed_mean, smoothed_var = sampler.message_helper.pf_latent_var_marginal(
observations, sampler.parameters, N=N, tqdm=tqdm)
axes[1].plot(smoothed_mean[:,0], '-C2', label='PF E[X|Y] +/- SD(X|Y)')
axes[1].plot(smoothed_mean[:,0]+np.sqrt(smoothed_var[:,0,0]),'--C2')
axes[1].plot(smoothed_mean[:,0]-np.sqrt(smoothed_var[:,0,0]),'--C2')
axes[0].legend()
axes[1].legend()
return fig, axes
def plot_garch_data_fit(observations, true_latent_vars=None,
sampler=None, tqdm=None, N=10000,
ignore_warning=False):
""" Plot fit of GARCH to the Data """
if observations.shape[0] > 1000 and not ignore_warning:
raise ValueError("PF inference for observations > 1000 is slow")
fig, axes = plt.subplots(2, 1, sharex=True)
axes[0].plot(observations, 'oC0', label='y_t')
axes[0].set_ylabel('observations')
axes[1].plot(observations**2, 'oC0',
label='y_t^2')
axes[1].set_ylabel('observations^2')
if true_latent_vars is not None:
axes[0].plot(true_latent_vars, '-C1', label='x_t')
axes[1].plot(true_latent_vars**2, '-C1', label='x_t^2')
if sampler is not None:
from sgmcmc_ssm.models.garch import GARCHSampler
if not isinstance(sampler, GARCHSampler):
raise ValueError("sampler must be an GARCHSampler")
smoothed_mean, smoothed_var = sampler.message_helper.pf_latent_var_marginal(
observations, sampler.parameters, N=N, tqdm=tqdm)
axes[0].plot(smoothed_mean[:,0], '-C2', label='PF E[X|Y] +/- SD(X|Y)')
axes[0].plot(smoothed_mean[:,0]+np.sqrt(smoothed_var[:,0,0]),'--C2')
axes[0].plot(smoothed_mean[:,0]-np.sqrt(smoothed_var[:,0,0]),'--C2')
axes[1].plot(smoothed_mean[:,0]**2, '-C2', label='PF E[X|Y]**2')
axes[0].legend()
axes[1].legend()
return fig, axes
| 7,187 | 38.065217 | 91 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/base_parameters.py | """
Base Parameters
"""
import numpy as np
from copy import deepcopy
# Param and ParamHelper Class
class BaseParameters(object):
""" Base Class for Parameters """
# List of ParamHelpers
_param_helper_list = []
# Set Attributes (must be copied in each subclass)
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
# Methods
def __init__(self, **kwargs):
self.dim = {}
self.var_dict = {}
for param_helper in self._param_helper_list:
param_helper.set_var(self, **kwargs)
return
def _set_check_dim(self, **kwargs):
for dim_key, dim_value in kwargs.items():
if dim_key in self.dim:
if dim_value != self.dim[dim_key]:
raise ValueError(
"{0} does not match existing dims {1} != {2}".format(
dim_key, dim_value, self.dim[dim_key])
)
else:
self.dim[dim_key] = dim_value
return
def as_dict(self, copy=True):
""" Return Dict """
if copy:
return self.var_dict.copy()
else:
return self.var_dict
def as_vector(self):
""" Return flatten vector representation """
return self.from_dict_to_vector(self.var_dict, **self.dim)
def from_vector(self, vector):
""" Set Vars from vector """
var_dict = self.from_vector_to_dict(vector, **self.dim)
self.set_var_dict(**var_dict)
return
@classmethod
def from_dict_to_vector(cls, var_dict, **dim):
""" Convert dict of variable-values to flattened vector """
vector_list = []
for param_helper in cls._param_helper_list:
param_helper.from_dict_to_vector(vector_list, var_dict, **dim)
return np.concatenate([vec for vec in vector_list])
@classmethod
def from_vector_to_dict(cls, vector, **dim):
""" Convert flattened vector to dict of variable-values """
var_dict = {}
vector_index = 0
for param_helper in cls._param_helper_list:
vector_index = param_helper.from_vector_to_dict(
var_dict, vector, vector_index, **dim)
return var_dict
def __iadd__(self, other):
if isinstance(other, dict):
for key in self.var_dict:
self.var_dict[key] += other[key]
else:
raise TypeError("Addition only defined for dict not {0}".format(
type(other)))
return self
def __add__(self, other):
out = self.copy()
out += other
return out
def __radd__(self, other):
return self + other
def copy(self):
new_obj = type(self)(**deepcopy(self.var_dict))
return new_obj
def project_parameters(self, **kwargs):
""" Project Parameters using passed options """
for param_helper in self._param_helper_list:
param_helper.project_parameters(self, **kwargs)
return self
class ParamHelper(object):
""" Base Class for ParamHelper """
def __init__(self, name='theta', dim_names=None):
self.name = name
self.dim_names = [] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
raise NotImplementedError()
def project_parameters(self, param, **kwargs):
raise NotImplementedError()
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
raise NotImplementedError()
return vector_index
def get_properties(self):
# Returns a dict with key-property
raise NotImplementedError()
return {}
# Prior and PriorHelper Class
class BasePrior(object):
""" Base Class for Priors """
# Set Parameters class (should be overriden in subclass)
_Parameters = BaseParameters
# List of PriorHelpers
_prior_helper_list = []
# Methods
def __init__(self, **kwargs):
self.dim = {}
self.hyperparams = {}
for prior_helper in self._prior_helper_list:
prior_helper.set_hyperparams(self, **kwargs)
return
def sample_prior(self, **kwargs):
""" sample parameters from prior """
var_dict = {}
for prior_helper in self._prior_helper_list:
prior_helper.sample_prior(self, var_dict, **kwargs)
parameters = self._Parameters(**var_dict)
return parameters
def sample_posterior(self, sufficient_stat, **kwargs):
""" sample parameters from posterior (for conjugate models) """
var_dict = {}
for prior_helper in self._prior_helper_list:
prior_helper.sample_posterior(
self, var_dict, sufficient_stat, **kwargs)
parameters = self._Parameters(**var_dict)
return parameters
def logprior(self, parameters, **kwargs):
""" Return the log prior density for parameters
Args:
parameters (Parameters)
Returns:
logprior (double)
"""
logprior = 0.0
for prior_helper in self._prior_helper_list:
logprior = prior_helper.logprior(
self, logprior, parameters, **kwargs)
return np.asscalar(logprior)
def grad_logprior(self, parameters, **kwargs):
""" Return the gradient of log prior density for parameters
Args:
parameters (Parameters)
Returns:
grad (dict)
"""
grad = {}
for prior_helper in self._prior_helper_list:
prior_helper.grad_logprior(self, grad, parameters, **kwargs)
return grad
@classmethod
def generate_prior(cls, parameters, from_mean=False, var=1.0):
""" Generate Prior to have parameters as its mean """
prior_kwargs = {}
for prior_helper in cls._prior_helper_list:
prior_helper.get_prior_kwargs(prior_kwargs, parameters,
from_mean=from_mean, var=var)
return cls(**prior_kwargs)
@classmethod
def generate_default_prior(cls, var=100.0, **kwargs):
""" Generate Default Prior """
default_kwargs = {}
for prior_helper in cls._prior_helper_list:
prior_helper.get_default_kwargs(default_kwargs, var=var, **kwargs)
return cls(**default_kwargs)
def _set_check_dim(self, **kwargs):
for dim_key, dim_value in kwargs.items():
if dim_key in self.dim:
if dim_value != self.dim[dim_key]:
raise ValueError(
"{0} does not match existing dims {1} != {2}".format(
dim_key, dim_value, self.dim[dim_key])
)
else:
self.dim[dim_key] = dim_value
return
class PriorHelper(object):
""" Base Class for PriorHelper """
def __init__(self, name='theta', dim_names=None):
self.name = name
self.dim_names = [] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
raise NotImplementedError()
def sample_prior(self, prior, var_dict, **kwargs):
raise NotImplementedError()
def sample_posterior(self, prior, var_dict, set_value_func, **kwargs):
raise NotImplementedError()
def logprior(self, prior, logprior, parameters, **kwargs):
raise NotImplementedError()
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
raise NotImplementedError()
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
raise NotImplementedError()
def get_default_kwargs(self, default_kwargs, **kwargs):
raise NotImplementedError()
# Preconditioner and PrecondHelper Class
class BasePreconditioner(object):
""" Base Class for Preconditioner """
# List of PrecondHelpers
_precond_helper_list = []
# Methods
def __init__(self, **kwargs):
for precond_helper in self._precond_helper_list:
precond_helper.set_preconditioner_vars(self, **kwargs)
return
def precondition(self, grad, parameters, scale=1.0, **kwargs):
""" Return dict with precondition gradients """
precond_grad = {}
for precond_helper in self._precond_helper_list:
precond_helper.precondition(self, precond_grad, grad, parameters,
**kwargs)
for var in precond_grad:
precond_grad[var] *= scale
return precond_grad
def precondition_noise(self, parameters, scale=1.0):
""" Return dict with precondition noise """
noise = {}
for precond_helper in self._precond_helper_list:
precond_helper.precondition_noise(self, noise, parameters)
for var in noise:
noise[var] *= scale**0.5
return noise
def correction_term(self, parameters, scale=1.0):
""" Return dict with correction term """
correction = {}
for precond_helper in self._precond_helper_list:
precond_helper.correction_term(self, correction, parameters)
for var in correction:
correction[var] = correction[var]*scale
return correction
class PrecondHelper(object):
""" Base Class for PrecondHelper """
def __init__(self, name='theta', dim_names=None):
self.name = name
self.dim_names = [] if dim_names is None else dim_names
return
def set_preconditioner_vars(self, preconditioner, **kwargs):
return
def precondition(self, preconditioner, precond_grad, grad, parameters,
**kwargs):
return
def precondition_noise(self, preconditioner, noise, parameters, **kwargs):
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
return
# Property Helper Functions
def get_value_func(name):
def fget(self):
return self.var_dict[name]
return fget
def set_value_func(name):
def fset(self, value):
self.var_dict[name] = value
return
return fset
def get_dim_func(name):
def fget(self):
return self.dim[name]
return fget
def get_hyperparam_func(name):
def fget(self):
return self.hyperparams[name]
return fget
def set_hyperparam_func(name):
def fset(self, value):
self.hyperparams[name] = value
return
return fset
# Temporary Demo
| 10,720 | 29.807471 | 80 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/evaluator.py | import numpy as np
import pandas as pd
import logging
import time
logger = logging.getLogger(name=__name__)
class BaseEvaluator(object):
""" Evaluator Base Class """
def __init__(self, sampler, metric_functions=None, sample_functions=None):
# Set Sampler
self.sampler = sampler
# Check metric functions
metric_functions = self._process_metric_functions(metric_functions)
self.metric_functions = metric_functions
# Check sample functions
sample_functions = self._process_sample_functions(sample_functions)
self.sample_functions = sample_functions
# Base Init
self.metrics = pd.DataFrame()
self.samples = pd.DataFrame()
return
@staticmethod
def _process_metric_functions(metric_functions):
if callable(metric_functions):
metric_functions = [metric_functions]
elif isinstance(metric_functions, list):
for metric_function in metric_functions:
if not callable(metric_function):
raise ValueError("metric_functions must be list of funcs")
elif metric_functions is None:
metric_functions = []
else:
ValueError("metric_functions should be list of funcs")
return metric_functions
@staticmethod
def _process_sample_functions(sample_functions):
if callable(sample_functions):
sample_functions = [sample_functions]
elif isinstance(sample_functions, list):
for sample_function in sample_functions:
if not callable(sample_function):
raise ValueError("sample_functions must be list of funcs")
elif sample_functions is None:
sample_functions = []
else:
ValueError("sample_functions should be list of funcs")
return sample_functions
def eval_sample_functions(self, sample_functions = None, iteration = None):
""" Extract samples from current state of sampler
Args:
sample_functions (list of funcs): sample functions
Defaults to sample functions defined in __init__
"""
if sample_functions is None:
sample_functions = self.sample_functions
else:
sample_functions = self._process_sample_functions(sample_functions)
if iteration is None:
iteration = self.iteration
if len(sample_functions) == 0:
# Skip if no sample_functions
return
iter_samples = []
for sample_function in sample_functions:
sample = sample_function(self.sampler)
if isinstance(sample, dict):
logger.debug("Sample: %s", str(sample))
iter_samples.append(sample)
elif isinstance(sample, list):
for sam in sample:
if not isinstance(sam, dict):
raise TypeError("Sample function output must be " + \
"dict or list of dict")
logger.debug("Sample: %s", str(sam))
iter_samples.append(sam)
else:
raise TypeError("sample_functions output must be " + \
"dict or list of dict")
iter_sample = pd.DataFrame(iter_samples)
iter_sample["iteration"] = iteration
self.samples = self.samples.append(iter_sample,
ignore_index=True, sort=True)
return
def eval_metric_functions(self, metric_functions = None, iteration = None):
""" Evaluate the state of the sampler
Args:
metric_functions (list of funcs): evaluation functions
Defaults to metric functions defined in __init__
"""
if metric_functions is None:
metric_functions = self.metric_functions
else:
metric_functions = self._process_metric_functions(metric_functions)
if iteration is None:
iteration = self.iteration
if len(metric_functions) == 0:
# Skip if no metric_functions
return
iter_metrics = []
for metric_function in metric_functions:
metric = metric_function(self.sampler)
if isinstance(metric, dict):
logger.info("Metric: %s", str(metric))
iter_metrics.append(metric)
elif isinstance(metric, list):
for met in metric:
if not isinstance(met, dict):
raise TypeError("Metric must be dict or list of dict")
logger.info("Metric: %s", str(met))
iter_metrics.append(met)
else:
raise TypeError("Metric must be dict or list of dict")
iter_metrics = pd.DataFrame(iter_metrics)
iter_metrics["iteration"] = iteration
self.metrics = self.metrics.append(iter_metrics,
ignore_index=True, sort=True)
return
def get_metrics(self, extra_columns={}):
""" Return a pd.DataFrame copy of metrics
Args:
extra_columns (dict): extra metadata to add as columns
Returns:
pd.DataFrame with columns
metric, variable, value, iteration, extra_columns
"""
metrics = self.metrics.copy()
for k,v in extra_columns.items():
metrics[k] = v
return metrics
def save_metrics(self, filename, extra_columns = {}):
""" Save a pd.DataFrame to filename + '.csv' """
metrics = self.get_metrics(extra_columns)
logger.info("Saving metrics to file %s", filename)
metrics.to_csv(filename + ".csv", index = False)
return
def get_samples(self, extra_columns = {}):
""" Return a pd.DataFrame of samples """
if self.sample_functions is None:
logger.warning("No sample functions were provided to track!!!")
samples = self.samples.copy()
for k,v in extra_columns.items():
samples[k] = v
return samples
def save_samples(self, filename):
""" Save a pd.DataFrame to filename + '.csv' """
samples = self.get_samples()
logger.info("Saving samples to file %s", filename)
samples.to_csv(filename + ".csv", index = False)
return
# Old Evaluator
class SamplerEvaluator(BaseEvaluator):
""" Wrapper to handle measuring a Sampler's Performance Online
Args:
sampler (Sampler): the sampler
metric_functions (func or list of funcs): evaluation functions
Each function takes a sampler and returns a dict (or list of dict)
{metric, variable, value} for each
(See metric_functions)
sample_functions (func or list of funcs, optional): samples to save
Each function takes a sampler and returns a dict (or list of dict)
"variable", "value"
init_state (dict, optional): state of evaluater
must contain metrics_df, samples_df, and iteration
Attributes:
metrics (pd.DataFrame): output data frame with columns
* metric (string)
* variable (string)
* value (double)
* iteration (int)
samples (pd.DataFrame): sampled params with columns
* variable (string)
* value (object)
* iteration (int)
"""
def __init__(self, sampler,
metric_functions = None, sample_functions = None,
init_state=None, **kwargs):
self.sampler = sampler
# Check metric functions
metric_functions = self._process_metric_functions(metric_functions)
self.metric_functions = metric_functions
# Check sample functions
sample_functions = self._process_sample_functions(sample_functions)
self.sample_functions = sample_functions
if init_state is None:
self.iteration = 0
self.elapsed_time = 0.0
self._init_metrics()
self._init_samples()
else:
self.load_state(**init_state)
return
def load_state(self, metrics_df, samples_df, iteration, elapsed_time):
""" Overwrite metrics, samples, and iteration """
if metrics_df.shape[0] > 0:
self.metrics = metrics_df[
['iteration', 'metric', 'variable', 'value']]
else:
self.metrics = metrics_df
if samples_df.shape[0] > 0:
self.samples = samples_df[['iteration', 'variable', 'value']]
else:
self.samples = samples_df
self.iteration = iteration
self.elapsed_time = elapsed_time
return
def get_state(self):
""" Return dict with metrics_df, samples_df, iteration """
state = dict(
metrics_df = self.metrics,
samples_df = self.samples,
iteration = self.iteration,
elapsed_time = self.elapsed_time,
)
return state
def _init_metrics(self):
self.metrics = pd.DataFrame()
self.eval_metric_functions()
init_metric = {
"variable": "runtime",
"metric": "runtime",
"value": 0.0,
"iteration": self.iteration,
}
self.metrics = self.metrics.append(init_metric,
ignore_index=True, sort=True)
return
def reset_metrics(self):
""" Reset self.metrics """
logger.info("Resetting metrics")
self.iteration = 0
self.elapsed_time = 0.0
self._init_metrics()
return
def _init_samples(self):
self.samples = pd.DataFrame()
self.eval_sample_functions()
return
def reset_samples(self):
""" Reset self.samples """
logger.info("Resetting samples")
self._init_samples()
return
def evaluate_sampler_step(self, iter_func_name,
iter_func_kwargs = None, evaluate = True):
""" Evaluate the performance of the sampler steps
Args:
iter_func_name (string or list of strings):
name(s) of sampler member functions
(e.g. `'sample_sgld'` or `['sample_sgld']*10`)
iter_func_kwargs (kwargs or list of kwargs):
options to pass to iter_func_name
evaluate (bool): whether to perform evaluation (default = True)
Returns:
out (ouptput of iter_func_name)
"""
logger.info("Sampler %s, Iteration %d",
self.sampler.name, self.iteration+1)
# Single Function
if isinstance(iter_func_name, str):
iter_func = getattr(self.sampler, iter_func_name, None)
if iter_func is None:
raise ValueError(
"iter_func_name `{}` is not in sampler".format(
iter_func_name)
)
if iter_func_kwargs is None:
iter_func_kwargs = {}
sampler_start_time = time.time()
out = iter_func(**iter_func_kwargs)
sampler_step_time = time.time() - sampler_start_time
# Multiple Steps
elif isinstance(iter_func_name, list):
iter_funcs = [getattr(self.sampler, func_name, None)
for func_name in iter_func_name]
if None in iter_funcs:
raise ValueError("Invalid iter_func_name")
if iter_func_kwargs is None:
iter_func_kwargs = [{} for _ in iter_funcs]
if not isinstance(iter_func_kwargs, list):
raise TypeError("iter_func_kwargs must be a list of dicts")
if len(iter_func_kwargs) != len(iter_func_name):
raise ValueError("iter_func_kwargs must be same length " +
"as iter_func_name")
sampler_start_time = time.time()
out = []
for iter_func, kwargs in zip(iter_funcs, iter_func_kwargs):
out.append(iter_func(**kwargs))
sampler_step_time = time.time() - sampler_start_time
else:
raise TypeError("Invalid iter_func_name")
self.iteration += 1
self.elapsed_time += sampler_step_time
time_metric = [{
"variable": "time",
"metric": "time",
"value": sampler_step_time,
"iteration": self.iteration,
},
{
"variable": "runtime",
"metric": "runtime",
"value": self.elapsed_time,
"iteration": self.iteration,
}]
if evaluate:
# Save Metrics
self.metrics = self.metrics.append(time_metric,
ignore_index=True, sort=True)
self.eval_metric_functions()
# Save Samples
if self.sample_functions is not None:
self.eval_sample_functions()
return out
# Offline Evaluator
class OfflineEvaluator(BaseEvaluator):
""" Wrapper to handle measuring a Sampler's Performance Offline
Args:
sampler (Sampler): the sampler
parameters_list (list or DataFrame): list of parameters to evaluate offline
parameters_times (list or ndarray, optional): fit time for parameters
Should be same size as parameters_list
Time is in seconds
metric_functions (func or list of funcs, optional): evaluation functions
Each function takes a sampler and returns a dict (or list of dict)
{metric, variable, value} for each
(See metric_functions)
sample_functions (func or list of funcs, optional): samples to save
Each function takes a sampler and returns a dict (or list of dict)
"variable", "value"
init_state (dict, optional): state of evaluater
must contain metrics_df, samples_df, and eval_flag
Attributes:
metrics (pd.DataFrame): output data frame with columns
* metric (string)
* variable (string)
* value (double)
* iteration (int)
samples (pd.DataFrame): sampled params with columns
* variable (string)
* value (object)
* iteration (int)
"""
def __init__(self, sampler, parameters_list, parameters_times=None,
metric_functions = None, sample_functions = None,
init_state=None):
# Set Sampler
self.sampler = sampler
# Set parameter list
self.parameters_list = self._process_parameters_list(parameters_list)
self.iteration = np.max(self.parameters_list['iteration'])
self.parameters_times = self._process_parameters_times(parameters_times)
if parameters_times is not None:
if self.parameters_times.shape[0] != self.parameters_list.shape[0]:
raise ValueError(
"parameters_times and parameters_list are not the same length"
)
# Check metric functions
metric_functions = self._process_metric_functions(metric_functions)
self.metric_functions = metric_functions
# Check sample functions
sample_functions = self._process_sample_functions(sample_functions)
self.sample_functions = sample_functions
if init_state is None:
self.eval_flag = pd.DataFrame(dict(
iteration = self.parameters_list['iteration'],
eval_flag = [False
for _ in range(self.parameters_list.shape[0])],
))
self.metrics = pd.DataFrame()
self.samples = pd.DataFrame()
else:
self.load_state(**init_state)
return
@staticmethod
def _process_parameters_list(parameters_list):
if isinstance(parameters_list, pd.DataFrame):
if 'iteration' not in parameters_list.columns:
raise ValueError("`iteration` not found in parameters_list.columns")
if 'parameters' not in parameters_list.columns:
raise ValueError("`parameters` not found in parameters_list.columns")
return parameters_list.sort_values(by='iteration')
elif isinstance(parameters_list, list):
iteration = np.arange(len(parameters_list))
parameters_list = pd.DataFrame(dict(
iteration = iteration,
parameters = parameters_list,
))
return parameters_list
else:
raise ValueError("parameters_list is not a list or pd.DataFrame")
@staticmethod
def _process_parameters_times(parameters_times):
if parameters_times is None:
return None
elif isinstance(parameters_times, pd.DataFrame):
if 'iteration' not in parameters_times.columns:
raise ValueError("`iteration` not found in parameters_times")
if 'time' not in parameters_times.columns:
raise ValueError("`time` not found in parameters_times")
return parameters_times.sort_values(by='iteration')
elif isinstance(parameters_times, list) or \
isinstance(parameters_times, np.ndarray):
iteration = np.arange(len(parameters_times))
parameters_times = pd.DataFrame(dict(
iteration = iteration,
time = parameters_times,
))
return parameters_times
else:
raise ValueError("parameters_times is not a list or pd.DataFrame")
def load_state(self, metrics_df, samples_df, eval_flag):
""" Overwrite metrics, samples, and iteration """
if metrics_df.shape[0] > 0:
self.metrics = metrics_df[
['iteration', 'metric', 'variable', 'value']]
else:
self.metrics = metrics_df
if samples_df.shape[0] > 0:
self.samples = samples_df[['iteration', 'variable', 'value']]
else:
self.samples = samples_df
if eval_flag.shape[0] == self.parameters_list.shape[0]:
self.eval_flag = eval_flag
elif eval_flag.shape[0] < self.parameters_list.shape[0]:
self.eval_flag = pd.DataFrame(dict(
iteration = self.parameters_list['iteration'],
eval_flag = [False
for _ in range(self.parameters_list.shape[0])],
))
for row_index, row in eval_flag.iterrows():
if row['iteration'] == True:
self.eval_flag.iloc[row_index, 1] = row['eval_flag']
else:
self.eval_flag.loc[
self.eval_flag['iteration'] == row['iteration'],
'eval_flag'] = row['eval_flag']
else:
raise ValueError("eval_flag + parameters_list do not match lengths")
return
def get_state(self):
""" Return dict with metrics_df, samples_df, eval_flag """
state = dict(
metrics_df = self.metrics,
samples_df = self.samples,
eval_flag = self.eval_flag,
)
return state
def num_to_eval(self):
return self.eval_flag.shape[0] - self.eval_flag.eval_flag.sum()
def evaluate(self, num_to_eval=None, max_time=None, eval_order="recursive",
iter_func_name=None, iter_func_kwargs=None,
tqdm=None):
""" Evaluate the parameters in parameters_list
Evaluate both metric_funcs + iter_funcs
Args:
num_to_eval (int): number of parameters to evaluate
(default is to evaluate all)
max_time (double): max time to evaluate in seconds
(default is inf)
eval_order (string): order to evaluate parameters
(that haven't been evaluated)
"recursive": evaluate first, last, and then recursively bisect
"sequential": evaluate first, second, third
iter_func_name (string or list of strings):
functions to call before evaluation (after setting parameters)
iter_func_kwargs (kwargs or list of kwargs):
options to pass to iter_func_name
Return:
"""
if num_to_eval is None:
num_to_eval = self.num_to_eval()
if max_time is None:
max_time = np.inf
pbar = range(min(num_to_eval, self.num_to_eval()))
if tqdm is not None:
pbar = tqdm(pbar, desc="offline eval")
eval_start_time = time.time()
for _ in pbar:
if self.eval_flag.eval_flag.sum() == self.eval_flag.shape[0]:
logging.warning("No more parameters to evaluate on")
return
iteration = self._get_eval_iteration(eval_order)
self.sampler.parameters = self.parameters_list[
self.parameters_list['iteration'] == iteration
]['parameters'].iloc[0].copy()
if tqdm is not None:
pbar.set_description("offline eval on iteration {}".format(
iteration))
logger.info("Sampler %s, Iteration %d",
self.sampler.name, iteration)
# Call Sampler Func
if iter_func_name is None:
pass
elif isinstance(iter_func_name, str):
# Single Functions
iter_func = getattr(self.sampler, iter_func_name, None)
if iter_func is None:
raise ValueError(
"iter_func_name `{}` is not in sampler".format(
iter_func_name)
)
if iter_func_kwargs is None:
iter_func_kwargs = {}
iter_func(**iter_func_kwargs)
elif isinstance(iter_func_name, list):
# Multiple Functions
iter_funcs = [getattr(self.sampler, func_name, None)
for func_name in iter_func_name]
if None in iter_funcs:
raise ValueError("Invalid iter_func_name")
if iter_func_kwargs is None:
iter_func_kwargs = [{} for _ in iter_funcs]
if not isinstance(iter_func_kwargs, list):
raise TypeError("iter_func_kwargs must be a list of dicts")
if len(iter_func_kwargs) != len(iter_func_name):
raise ValueError("iter_func_kwargs must be same length " +
"as iter_func_name")
for iter_func, kwargs in zip(iter_funcs, iter_func_kwargs):
iter_func(**kwargs)
else:
raise TypeError("Invalid iter_func_name")
# Evaluate metrics + samples
self.eval_metric_functions(iteration=iteration)
self.eval_sample_functions(iteration=iteration)
# Mark Iteration as sampled
self.eval_flag.loc[
self.eval_flag.iteration == iteration, 'eval_flag'] = True
# Early Termination on Max Time
if time.time() - eval_start_time > max_time:
break
return
def _get_eval_iteration(self, eval_order="recursive"):
# Get next iteration in parameters_list to evaluate
if not self.eval_flag.eval_flag.iloc[0]:
# Always evaluate first iteration
return self.eval_flag.iteration.iloc[0]
if eval_order == "sequential":
index = np.argmax(self.eval_flag.eval_flag.ne(True).values)
iteration = self.eval_flag.iteration.iloc[index]
elif eval_order == "recursive":
num_indices = self.eval_flag.shape[0]
evaled_indices = np.arange(num_indices, dtype=int)[
self.eval_flag.eval_flag]
eval_gaps = np.pad(evaled_indices, ((0, 1)),
mode='constant',
constant_values=num_indices-1,
)[1:] - evaled_indices
eval_gaps[-1] *= 2 # Double last diff
max_gap_index = np.argmax(eval_gaps)
index = (
evaled_indices[max_gap_index] -
(-eval_gaps[max_gap_index]//2) #Ceil
)
iteration = self.eval_flag.iteration.iloc[index]
else:
raise ValueError("Unrecoginized eval_order {0}".format(eval_order))
return iteration
@staticmethod
def _add_time(df, times):
""" Append times to df by matching on iteration """
df_times = pd.merge(df, times, on='iteration')
return df_times
def get_metrics(self, extra_columns={}):
metrics = super().get_metrics(extra_columns=extra_columns)
if self.parameters_times is not None:
metrics = self._add_time(metrics, self.parameters_times)
return metrics
def get_samples(self, extra_columns={}):
samples = super().get_samples(extra_columns=extra_columns)
if self.parameters_times is not None:
samples = self._add_time(samples, self.parameters_times)
return samples
# Helper functions
def average_parameters_list(parameters_list, burnin=None):
""" Return a running average of parameters after burnin
theta_bar_t = (sum_{s <= t, s > burnin} theta_s)/(t+1) for t >= burnin
theta_bar_t = theta_t for t < burnin
Args:
parameters_list (list or pd.Series): list of parameters
burnin (int, optional): number of burnin step
(default is 0.33 of parameters_list)
Returns:
averaged_parameters_list (list): list of averaged parameters
"""
if isinstance(parameters_list, pd.Series):
parameters_list = parameters_list.tolist()
if len(parameters_list) == 0:
return
Parameters = type(parameters_list[0])
parameters_dim = parameters_list[0].dim
if burnin is None:
burnin = int(len(parameters_list)*0.33)
parameters_vectors = [None] * len(parameters_list)
for ii, parameters in enumerate(parameters_list):
parameters_vectors[ii] = parameters.as_vector()
parameters_vectors = np.array(parameters_vectors)
parameters_vectors[burnin:] = (
np.cumsum(parameters_vectors[burnin:], axis=0) /
np.arange(1,parameters_vectors.shape[0]-burnin+1)[:,None]
)
averaged_parameters_list = [None] * len(parameters_list)
for ii, vector in enumerate(parameters_vectors):
averaged_parameters_list[ii] = Parameters(
**Parameters.from_vector_to_dict(vector, **parameters_dim)
)
return averaged_parameters_list
def half_average_parameters_list(parameters_list):
""" Return a running average of last half parameters
theta_bar_t = mean_{t/2 <= s <= t} theta_s
Args:
parameters_list (list or pd.Series): list of parameters
burnin (int, optional): number of burnin step
(default is 0.33 of parameters_list)
Returns:
half_averaged_parameters_list (list): list of averaged parameters
"""
if isinstance(parameters_list, pd.Series):
parameters_list = parameters_list.tolist()
if len(parameters_list) == 0:
return
Parameters = type(parameters_list[-1])
parameters_dim = parameters_list[-1].dim
parameters_vectors = [None] * len(parameters_list)
for ii, parameters in enumerate(parameters_list):
parameters_vectors[ii] = parameters.as_vector()
parameters_vectors = np.array(parameters_vectors)
parameters_vectors = np.array([
np.mean(parameters_vectors[ii//2:ii+1,:], axis=0)
for ii in range(parameters_vectors.shape[0])
])
half_averaged_parameters_list = [None] * len(parameters_list)
for ii, vector in enumerate(parameters_vectors):
half_averaged_parameters_list[ii] = Parameters(
**Parameters.from_vector_to_dict(vector, **parameters_dim)
)
return half_averaged_parameters_list
| 28,263 | 36.735648 | 85 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/metric_functions.py | """
Sampler Metric Function Generators
"""
import numpy as np
def sample_function_parameter(parameter_name, return_variable_name=None):
""" Returns sample function that extracts a parameter from current state
Args:
parameter_name (string): atrribute in sampler.parameters
(e.g. A, C, LRinv, R)
return_variable_name (string, optional): name of return name
default is `parameter_name`
Returns:
A function of sampler that returns dictionary of variable, value
"""
if return_variable_name is None:
return_variable_name = parameter_name
def custom_sample_function(sampler):
cur_parameter = np.copy(getattr(sampler.parameters, parameter_name))
sample = {'variable': return_variable_name,
'value': cur_parameter,
}
return sample
return custom_sample_function
def sample_function_parameters(parameter_names, return_variable_names=None,
decorator=None):
""" Returns sample function that extracts a parameter from current state
Args:
parameter_names (list of string): atrributes in sampler.parameters
(e.g. A, C, LRinv, R)
return_variable_name (list of string, optional): names of return name
default is `parameter_name`
decorator (func, optional): function decorator
Returns:
A function of sampler that returns list of dictionary of variable, value
"""
if return_variable_names is None:
return_variable_names = [None]*len(parameter_names)
elif len(return_variable_names) != len(parameter_names):
raise ValueError("parameter and return names must be equal length")
sample_functions = [
sample_function_parameter(parameter_name, return_variable_name)
for parameter_name, return_variable_name in zip(
parameter_names, return_variable_names)
]
def custom_sample_function(sampler):
samples = [sample_function(sampler)
for sample_function in sample_functions]
return samples
if decorator is not None:
custom_sample_function = decorator(custom_sample_function)
return custom_sample_function
def metric_function_parameter(parameter_name, target_value, metric_name,
return_variable_name=None):
""" Returns metric function that compares samplers' state to a target
Args:
parameter_name (string): atrribute in sampler.parameters
(e.g. A, C, LRinv, R)
target_value (np.ndarray): target value
metric_name (string): name of a metric function
* 'logmse': log10 mean squared error
* 'mse': mean squared error
* 'mae': mean absolute error
(see construct_metric_function)
return_variable_name (string, optional): name of metric return name
default is `parameter_name`
Returns:
A function of sampler that returns dictionary of variable, metric, value
"""
metric_func = construct_metric_function(metric_name)
if return_variable_name is None:
return_variable_name = parameter_name
def custom_metric_function(sampler):
cur_parameter = getattr(sampler.parameters, parameter_name)
metric_value = metric_func(cur_parameter, target_value)
metric = {'variable': return_variable_name,
'metric': metric_name,
'value': metric_value
}
return metric
return custom_metric_function
def metric_function_parameters(parameter_names, target_values, metric_names,
return_variable_names=None, decorator=None, criteria=None,
double_permutation_flag=False):
""" Returns sample function that extracts a parameter from current state
Args:
parameter_names (list of string): atrributes in sampler.parameters
(e.g. A, C, LRinv, R)
target_values (list of np.ndarray): target values
metric_names (list of string): names of metric functions
* 'logmse': log10 mean squared error
* 'mse': mean squared error
* 'mae': mean absolute error
(see construct_metric_function)
return_variable_names (list of string, optional): names of return name
default is `parameter_name`
decorator (func, optional): function decorator
criteria (list of func or tuple, optional): e.g. [min] or None
double_permutation_flag (bool, optional): default False
Only for use with criteria for pi, expanded_pi, logit_pi vars
Returns:
A function of sampler that returns list of dictionaries of
variable, metric, value
"""
if (len(target_values) != len(parameter_names) or \
len(metric_names) != len(parameter_names)):
raise ValueError("input args not equal length")
if return_variable_names is None:
return_variable_names = [None]*len(parameter_names)
elif len(return_variable_names) != len(parameter_names):
raise ValueError("parameter and return names must be equal length")
if criteria is None:
metric_functions = [
metric_function_parameter(parameter_name, target_value,
metric_name, return_variable_name)
for parameter_name, target_value, metric_name, return_variable_name\
in zip(
parameter_names, target_values, metric_names,
return_variable_names)
]
else:
if double_permutation_flag:
metric_functions = [
best_double_permutation_metric_function_parameter(
parameter_name, target_value, metric_name,
return_variable_name, best_function)
for parameter_name, target_value, metric_name, \
return_variable_name, best_function \
in zip(
parameter_names, target_values, metric_names,
return_variable_names, criteria)
]
else:
metric_functions =[
best_permutation_metric_function_parameter(parameter_name,
target_value, metric_name, return_variable_name,
best_function)
for parameter_name, target_value, metric_name, \
return_variable_name, best_function \
in zip(
parameter_names, target_values, metric_names,
return_variable_names, criteria)
]
def custom_metric_function(sampler):
metrics = [metric_function(sampler)
for metric_function in metric_functions]
return metrics
if decorator is not None:
custom_metric_function = decorator(custom_metric_function)
return custom_metric_function
def metric_function_from_sampler(sampler_func_name, metric_name=None,
return_variable_name="sampler",
**sampler_func_kwargs):
""" Returns metric function that evaluates sampler_func_name
Example:
metric_function_of_sampler(sampler_func_name = "exact_loglikelihood")
"""
if metric_name is None:
metric_name = sampler_func_name
def custom_metric_function(sampler):
sampler_func = getattr(sampler, sampler_func_name, None)
if sampler_func is None:
raise ValueError(
"sampler_func_name `{}` is not in sampler".format(
sampler_func_name)
)
else:
metric_value = sampler_func(**sampler_func_kwargs)
metric = {'variable': return_variable_name,
'metric': metric_name,
'value': metric_value}
return metric
return custom_metric_function
def construct_metric_function(metric_name):
""" Return a metric function
Args:
metric_name (string): name of metric. Must be one of
* 'logmse': log10 mean squared error
* 'mse': mean squared error
* 'rmse': root mean squared error (L2 norm)
* 'mae': mean absolute error
Returns:
metric_function (function):
function of two inputs (result, expected)
"""
if(metric_name == "mse"):
def metric_function(result, expected):
return np.mean((result - expected)**2)
elif(metric_name == "logmse"):
def metric_function(result, expected):
return np.log10(np.mean((result - expected)**2))
elif(metric_name == "rmse"):
def metric_function(result, expected):
return np.sqrt(np.mean((result - expected)**2))
elif(metric_name == "mae"):
def metric_function(result, expected):
return np.mean(np.abs(result - expected))
else:
raise ValueError("Unrecognized metric name = %s" % metric_name)
return metric_function
def average_input_decorator(sampler_function):
""" Average over all calls to sampler_function """
def average_function(sampler):
average_function.num_calls += 1
average_function.sum_vector += sampler.parameters.vector
sampler_params = sampler.parameters.vector
sampler.parameters.vector = (average_function.sum_vector / \
average_function.num_calls)
output = sampler_function(sampler)
if isinstance(output, dict):
output['variable'] = "avg_"+output['variable']
else:
for out in output:
out['variable'] = "avg_"+out['variable']
sampler.parameters.vector = sampler_params
return output
average_function.num_calls = 0
average_function.sum_vector = 0.0
return average_function
def best_permutation_metric_function_parameter(parameter_name, target_value,
metric_name, return_variable_name=None, best_function=max):
""" Select `best' metric across all permutations of first index
Args:
parameter_name (string): atrribute in sampler.parameters
(e.g. A, Q) with sampler.num_states # different versions
target_value (np.ndarray): target value
metric_name (string): name of a metric function
* 'mse': mean squared error
* 'mae': mean absolute error
(see construct_metric_function)
return_variable_name (string, optional): name of metric return name
default is `parameter_name`
best_function (function, optional): takes list of double, return `best'
default is max, (e.g. max, min)
Returns:
A function of sampler that returns dictionary of variable, metric, value
"""
if best_function is None:
return metric_function_parameter(parameter_name, target_value,
metric_name, return_variable_name)
if return_variable_name is None:
return_variable_name = parameter_name
metric_func = construct_metric_function(metric_name)
if return_variable_name is None:
return_variable_name = parameter_name
import itertools
def custom_metric_function(sampler):
cur_parameter = getattr(sampler.parameters, parameter_name)
metric_values = [
metric_func(
cur_parameter,
target_value[np.array(permuted_indices)],
)
for permuted_indices in itertools.permutations(
np.arange(target_value.shape[0]))
]
metric_value = best_function(metric_values)
metric = {'variable': return_variable_name,
'metric': metric_name,
'value': metric_value
}
return metric
return custom_metric_function
def best_double_permutation_metric_function_parameter(parameter_name,
target_value, metric_name, return_variable_name=None, best_function=max):
""" Select `best' metric across all permutations of first and second index
Args:
parameter_name (string): atrribute in sampler.parameters
(e.g. pi) with sampler.num_states # different versions
target_value (np.ndarray): target value
metric_name (string): name of a metric function
* 'mse': mean squared error
* 'mae': mean absolute error
(see construct_metric_function)
return_variable_name (string, optional): name of metric return name
default is `parameter_name`
best_function (function, optional): takes list of double, return `best'
default is max, (e.g. max, min)
Returns:
A function of sampler that returns dictionary of variable, metric, value
"""
if best_function is None:
return metric_function_parameter(parameter_name, target_value,
metric_name, return_variable_name)
if return_variable_name is None:
return_variable_name = parameter_name
metric_func = construct_metric_function(metric_name)
if return_variable_name is None:
return_variable_name = parameter_name
import itertools
def custom_metric_function(sampler):
cur_parameter = getattr(sampler.parameters, parameter_name)
metric_values = [
metric_func(
cur_parameter,
target_value[np.array(permuted_indices)][:,
np.array(permuted_indices)],
)
for permuted_indices in itertools.permutations(
np.arange(target_value.shape[0]))
]
metric_value = best_function(metric_values)
metric = {'variable': return_variable_name,
'metric': metric_name,
'value': metric_value
}
return metric
return custom_metric_function
def noisy_logjoint_loglike_metric(metric_name_prefix='', **kwargs):
metric_names = [
'{0}noisy_logjoint'.format(metric_name_prefix),
'{0}noisy_loglikelihood'.format(metric_name_prefix),
]
def custom_metric_func(sampler):
res = sampler.noisy_logjoint(return_loglike=True, **kwargs)
return [
dict(
variable='sampler',
metric=metric_names[0],
value=res['logjoint'],
),
dict(
variable='sampler',
metric=metric_names[1],
value=res['loglikelihood'],
),
]
return custom_metric_func
def noisy_predictive_logjoint_loglike_metric(num_steps_ahead, kind='marginal',
metric_name_prefix='', **kwargs):
metric_names = [
'{0}{1}_pred_loglikelihood'.format(metric_name_prefix, ii)
for ii in range(num_steps_ahead+1)
]
if kind=='pf':
def custom_metric_func(sampler):
res = sampler.predictive_loglikelihood(
lag=num_steps_ahead,
kind=kind,
**kwargs)
return [
dict(
variable='sampler',
metric=metric_names[ii],
value=res[ii],
)
for ii in range(num_steps_ahead+1)
]
else:
def custom_metric_func(sampler):
res = sampler.predictive_loglikelihood(
lag=num_steps_ahead,
kind=kind,
**kwargs)
return [
dict(
variable='sampler',
metric=metric_names[-1],
value=res,
)
]
return custom_metric_func
def metric_compare_z(true_z):
""" Return NMI, Precision, Recall between inferred and true discrete labels
Args:
true_z (ndarray) length must match formatted observations
Most likely for AR(p) (T) -> (T-p+1)
"""
from sklearn.metrics import confusion_matrix, normalized_mutual_info_score
def metric_z_function(sampler):
pred_z = sampler.sample_z(track_samples=False)
nmi = normalized_mutual_info_score(true_z, pred_z)
cm = confusion_matrix(true_z, pred_z)
precision = np.sum(np.max(cm, axis=0))/(np.sum(cm)*1.0)
recall = np.sum(np.max(cm, axis=1))/(np.sum(cm)*1.0)
metric_list = [
dict(metric='nmi', variable='z', value=nmi),
dict(metric='precision', variable='z', value=precision),
dict(metric='recall', variable='z', value=recall),
]
return metric_list
return metric_z_function
def metric_compare_x(true_x):
""" Return RMSE, MAE between inferred and true latent variables
Args:
true_x (ndarray)
"""
def metric_x_function(sampler):
pred_x = sampler.sample_x(track_samples=False)
rmse = np.sqrt(np.mean((true_x - pred_x)**2))
logmse = np.log10(np.mean((true_x - pred_x)**2))
mae = np.mean(np.abs(true_x - pred_x))
metric_list = [
dict(metric='rmse', variable='x', value=rmse),
dict(metric='mae', variable='x', value=mae),
dict(metric='logmse', variable='x', value=logmse),
]
return metric_list
return metric_x_function
| 17,322 | 36.577007 | 81 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/__init__.py | from .sgmcmc_sampler import SGMCMCSampler, SGMCMCHelper
from .evaluator import SamplerEvaluator
| 96 | 31.333333 | 55 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/sgmcmc_sampler.py | import numpy as np
import pandas as pd
import time
from datetime import timedelta
import logging
from .evaluator import BaseEvaluator
logger = logging.getLogger(name=__name__)
NOISE_NUGGET=1e-9
# SGMCMCSampler
class SGMCMCSampler(object):
""" Base Class for SGMCMC with Time Series """
def __init__(self, **kwargs):
raise NotImplementedError()
## Init Functions
def setup(self, **kwargs):
# Depreciated
raise NotImplementedError()
def prior_init(self):
self.parameters = self.prior.sample_prior()
return self.parameters
## Loglikelihood Functions
def exact_loglikelihood(self, tqdm=None):
""" Return the exact loglikelihood given the current parameters """
loglikelihood = self.message_helper.marginal_loglikelihood(
observations=self.observations,
parameters=self.parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
return loglikelihood
def exact_logjoint(self, return_loglike=False, tqdm=None):
""" Return the loglikelihood + logprior given the current parameters """
loglikelihood = self.exact_loglikelihood(tqdm=tqdm)
logprior = self.prior.logprior(self.parameters)
if return_loglike:
return dict(
logjoint=loglikelihood + logprior,
loglikelihood=loglikelihood,
)
else:
return loglikelihood + logprior
def predictive_loglikelihood(self, kind='marginal', num_steps_ahead=10,
subsequence_length=-1, minibatch_size=1, buffer_length=10,
num_samples=1000, parameters=None, observations=None,
**kwargs):
""" Return the predictive loglikelihood given the parameters """
if parameters is None:
parameters = self.parameters
observations = self._get_observations(observations)
T = observations.shape[0]
if kind == 'marginal':
pred_loglikelihood = 0.0
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(buffer_length,
subsequence_length=subsequence_length,
T=T)
forward_message = self.message_helper.forward_message(
observations[
out['left_buffer_start']:out['subsequence_start']
],
self.parameters,
forward_message=self.forward_message,
tqdm=kwargs.get('tqdm', None),
)
# Noisy Loglikelihood should use only forward pass
# E.g. log Pr(y) \approx \sum_s log Pr(y_s | y<min(s))
pred_loglikelihood_S = (
self.message_helper.predictive_loglikelihood(
observations=observations,
parameters=parameters,
forward_message=forward_message,
backward_message=self.backward_message,
lag=num_steps_ahead,
tqdm=kwargs.get('tqdm', None),
))
pred_loglikelihood += (
pred_loglikelihood_S * (T-num_steps_ahead)/(
out['subsequence_end'] - out['subsequence_start'] - \
num_steps_ahead
))
pred_loglikelihood *= 1.0/minibatch_size
return pred_loglikelihood
elif kind == 'pf':
if kwargs.get("N", None) is None:
kwargs['N'] = num_samples
pred_loglikelihood = np.zeros(num_steps_ahead+1)
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(
buffer_length=buffer_length,
subsequence_length=subsequence_length,
T=T)
relative_start = (out['subsequence_start'] -
out['left_buffer_start'])
relative_end = (out['subsequence_end'] -
out['left_buffer_start'])
buffer_ = observations[
out['left_buffer_start']:
out['right_buffer_end']
]
pred_loglike_add = (
self.message_helper
.pf_predictive_loglikelihood_estimate(
observations=buffer_,
parameters=self.parameters,
num_steps_ahead=num_steps_ahead,
subsequence_start=relative_start,
subsequence_end=relative_end,
**kwargs)
)
for ll in range(num_steps_ahead+1):
pred_loglikelihood[ll] += pred_loglike_add[ll] * (T-ll)/(
out['subsequence_end'] - out['subsequence_start']-ll
)
pred_loglikelihood *= 1.0/minibatch_size
return pred_loglikelihood
else:
raise ValueError("Unrecognized kind = {0}".format(kind))
def noisy_loglikelihood(self, kind='marginal',
subsequence_length=-1, minibatch_size=1, buffer_length=10,
num_samples=None, observations=None,
**kwargs):
""" Subsequence Approximation to loglikelihood
Args:
kind (string): how to estimate the loglikelihood
subsequence_length (int): length of subsequence used in evaluation
minibatch_size (int): number of subsequences
buffer_length (int): length of each subsequence buffer
"""
observations = self._get_observations(observations)
T = observations.shape[0]
noisy_loglikelihood = 0.0
if kind == 'marginal':
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(buffer_length,
subsequence_length=subsequence_length,
T=T)
forward_message = self.message_helper.forward_message(
observations[
out['left_buffer_start']:out['subsequence_start']
],
self.parameters,
forward_message=self.forward_message,
tqdm=kwargs.get('tqdm', None),
)
# Noisy Loglikelihood should use only forward pass
# E.g. log Pr(y) \approx \sum_s log Pr(y_s | y<min(s))
noisy_loglikelihood += (
self.message_helper.marginal_loglikelihood(
observations=observations[
out['subsequence_start']:out['subsequence_end']
],
parameters=self.parameters,
weights=out['weights'],
forward_message=forward_message,
backward_message=self.backward_message,
tqdm=kwargs.get('tqdm', None),
) - forward_message['log_constant']
)
elif kind == 'complete':
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(
buffer_length=buffer_length,
subsequence_length=subsequence_length,
T=T)
buffer_ = observations[
out['left_buffer_start']:out['right_buffer_end']
]
# Draw Samples:
latent_buffer = self.sample_x(
parameters=self.parameters,
observations=buffer_,
num_samples=num_samples,
)
relative_start = out['subsequence_start']-out['left_buffer_start']
relative_end = out['subsequence_end']-out['left_buffer_start']
forward_message = {}
if relative_start > 0:
forward_message = dict(
x_prev = latent_buffer[relative_start-1]
)
noisy_loglikelihood += \
self.message_helper.complete_data_loglikelihood(
observations=observations[
out['subsequence_start']:out['subsequence_end']
],
latent_vars=latent_buffer[relative_start:relative_end],
weights=out['weights'],
parameters=self.parameters,
forward_message=forward_message,
)
elif kind == 'pf':
if kwargs.get("N", None) is None:
kwargs['N'] = num_samples
noisy_loglikelihood = 0.0
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(
buffer_length=buffer_length,
subsequence_length=subsequence_length,
T=T)
relative_start = (out['subsequence_start'] -
out['left_buffer_start'])
relative_end = (out['subsequence_end'] -
out['left_buffer_start'])
buffer_ = observations[
out['left_buffer_start']:
out['right_buffer_end']
]
noisy_loglikelihood += (
self.message_helper
.pf_loglikelihood_estimate(
observations=buffer_,
parameters=self.parameters,
weights=out['weights'],
subsequence_start=relative_start,
subsequence_end=relative_end,
**kwargs)
)
else:
raise ValueError("Unrecognized kind = {0}".format(kind))
noisy_loglikelihood *= 1.0/minibatch_size
if np.isnan(noisy_loglikelihood):
raise ValueError("NaNs in loglikelihood")
return noisy_loglikelihood
def noisy_logjoint(self, return_loglike=False, **kwargs):
""" Return the loglikelihood + logprior given the current parameters """
loglikelihood = self.noisy_loglikelihood(**kwargs)
logprior = self.prior.logprior(self.parameters)
if return_loglike:
return dict(
logjoint=loglikelihood + logprior,
loglikelihood=loglikelihood,
)
else:
return loglikelihood + logprior
## Gradient Functions
def _random_subsequence_and_buffers(self, buffer_length,
subsequence_length, T=None):
""" Get a subsequence and the forward and backward message approx"""
if T is None:
T = self._get_T()
if buffer_length == -1:
buffer_length = T
if (subsequence_length == -1) or (T-subsequence_length <= 0):
subsequence_start = 0
subsequence_end = T
weights = None
else:
subsequence_start, subsequence_end, weights = \
random_subsequence_and_weights(
S=subsequence_length,
T=T,
partition_style=self.options.get('partition_style'),
)
left_buffer_start = max(0, subsequence_start - buffer_length)
right_buffer_end = min(T, subsequence_end + buffer_length)
out = dict(
subsequence_start = subsequence_start,
subsequence_end = subsequence_end,
left_buffer_start = left_buffer_start,
right_buffer_end = right_buffer_end,
weights = weights,
)
return out
def _single_noisy_grad_loglikelihood(self, buffer_dict, kind='marginal',
num_samples=None, observations=None, parameters=None, **kwargs):
# buffer_dict is the output of _random_subsequence_and_buffers
observations = self._get_observations(observations, check_shape=False)
if parameters is None:
parameters = self.parameters
T = observations.shape[0]
if kind == 'marginal':
forward_message = self.message_helper.forward_message(
observations[
buffer_dict['left_buffer_start']:
buffer_dict['subsequence_start']
],
parameters,
forward_message=self.forward_message)
backward_message = self.message_helper.backward_message(
observations[
buffer_dict['subsequence_end']:
buffer_dict['right_buffer_end']
],
parameters,
backward_message=self.backward_message,
)
noisy_grad = (
self.message_helper
.gradient_marginal_loglikelihood(
observations=observations[
buffer_dict['subsequence_start']:
buffer_dict['subsequence_end']
],
parameters=parameters,
weights=buffer_dict['weights'],
forward_message=forward_message,
backward_message=backward_message,
**kwargs
)
)
elif kind == 'complete':
buffer_ = observations[
buffer_dict['left_buffer_start']:
buffer_dict['right_buffer_end']
]
# Draw Samples:
latent_buffer = self.sample_x(
parameters=parameters,
observations=buffer_,
num_samples=num_samples,
)
relative_start = (buffer_dict['subsequence_start'] -
buffer_dict['left_buffer_start'])
relative_end = (buffer_dict['subsequence_end'] -
buffer_dict['left_buffer_start'])
forward_message = {}
if relative_start > 0:
forward_message = dict(
x_prev = latent_buffer[relative_start-1]
)
noisy_grad = (
self.message_helper
.gradient_complete_data_loglikelihood(
observations=observations[
buffer_dict['subsequence_start']:
buffer_dict['subsequence_end']
],
latent_vars=latent_buffer[relative_start:relative_end],
parameters=parameters,
weights=buffer_dict['weights'],
forward_message=forward_message,
**kwargs)
)
elif kind == 'pf':
if kwargs.get("N", None) is None:
kwargs['N'] = num_samples
relative_start = (buffer_dict['subsequence_start'] -
buffer_dict['left_buffer_start'])
relative_end = (buffer_dict['subsequence_end'] -
buffer_dict['left_buffer_start'])
buffer_ = observations[
buffer_dict['left_buffer_start']:
buffer_dict['right_buffer_end']
]
noisy_grad = (
self.message_helper
.pf_gradient_estimate(
observations=buffer_,
parameters=self.parameters,
subsequence_start=relative_start,
subsequence_end=relative_end,
weights=buffer_dict['weights'],
**kwargs)
)
else:
raise ValueError("Unrecognized kind = {0}".format(kind))
return noisy_grad
def _noisy_grad_loglikelihood(self,
subsequence_length=-1, minibatch_size=1, buffer_length=0,
observations=None, buffer_dicts=None, **kwargs):
observations = self._get_observations(observations, check_shape=False)
T = observations.shape[0]
if buffer_dicts is None:
buffer_dicts = [
self._random_subsequence_and_buffers(
buffer_length=buffer_length,
subsequence_length=subsequence_length,
T=T)
for _ in range(minibatch_size)
]
elif len(buffer_dicts) != minibatch_size:
raise ValueError("len(buffer_dicts != minibatch_size")
noisy_grad = {var: np.zeros_like(value)
for var, value in self.parameters.as_dict().items()}
for s in range(0, minibatch_size):
noisy_grad_add = self._single_noisy_grad_loglikelihood(
buffer_dict=buffer_dicts[s],
observations=observations,
**kwargs,
)
for var in noisy_grad:
noisy_grad[var] += noisy_grad_add[var] * 1.0/minibatch_size
if np.any(np.isnan(noisy_grad[var])):
raise ValueError("NaNs in gradient of {0}".format(var))
if np.linalg.norm(noisy_grad[var]) > 1e16:
logger.warning("Norm of noisy_grad_loglike[{1} > 1e16: {0}".format(
noisy_grad[var], var))
return noisy_grad
def noisy_gradient(self, preconditioner=None, is_scaled=True, **kwargs):
""" Noisy Gradient Estimate
noisy_gradient = -grad tilde{U}(theta)
= grad marginal loglike + grad logprior
Monte Carlo Estimate of gradient (using buffering)
Args:
preconditioner (object): preconditioner for gradients
is_scaled (boolean): scale gradient by 1/T
**kwargs: arguments for `self._noisy_grad_loglikelihood()`
For example: minibatch_size, buffer_length, use_analytic
Returns:
noisy_gradient (dict): dict of gradient vectors
"""
noisy_grad_loglike = \
self._noisy_grad_loglikelihood(**kwargs)
noisy_grad_prior = self.prior.grad_logprior(
parameters=kwargs.get('parameters',self.parameters),
**kwargs
)
noisy_gradient = {var: noisy_grad_prior[var] + noisy_grad_loglike[var]
for var in noisy_grad_prior}
if preconditioner is None:
if is_scaled:
for var in noisy_gradient:
noisy_gradient[var] /= self._get_T(**kwargs)
else:
scale = 1.0/self._get_T(**kwargs) if is_scaled else 1.0
noisy_gradient = preconditioner.precondition(noisy_gradient,
parameters=kwargs.get('parameters',self.parameters),
scale=scale)
return noisy_gradient
## Sampler/Optimizer Step Functions
def step_sgd(self, epsilon, **kwargs):
""" One step of Stochastic Gradient Descent
(Learns the MAP, not a sample from the posterior)
Args:
epsilon (double): step size
**kwargs (kwargs): to pass to self.noisy_gradient
minibatch_size (int): number of subsequences to sample from
buffer_length (int): length of buffer to use
Returns:
parameters (Parameters): sampled parameters after one step
"""
delta = self.noisy_gradient(**kwargs)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += epsilon * delta[var]
return self.parameters
def step_precondition_sgd(self, epsilon, preconditioner, **kwargs):
""" One Step of Preconditioned Stochastic Gradient Descent
Args:
epsilon (double): step size
preconditioner (object): preconditioner
**kwargs (kwargs): to pass to self.noisy_gradient
minibatch_size (int): number of subsequences to sample from
buffer_length (int): length of buffer to use
Returns:
parameters (Parameters): sampled parameters after one step
"""
delta = self.noisy_gradient(preconditioner=preconditioner, **kwargs)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += epsilon * delta[var]
return self.parameters
def step_adagrad(self, epsilon, **kwargs):
""" One step of adagrad
(Learns the MAP, not a sample from the posterior)
Args:
epsilon (double): step size
**kwargs (kwargs): to pass to self.noisy_gradient
"""
if not hasattr(self, "_adagrad_moments"):
self._adagrad_moments = dict(t=0, G=0.0)
g = self.parameters.from_dict_to_vector(self.noisy_gradient(**kwargs))
t = self._adagrad_moments['t'] + 1
G = self._adagrad_moments['G'] + g**2
delta_vec = g/np.sqrt(G + NOISE_NUGGET)
delta = self.parameters.from_vector_to_dict(delta_vec,
**self.parameters.dim)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += epsilon * delta[var]
self._adagrad_moments['t'] = t
self._adagrad_moments['G'] = G
return self.parameters
def _get_sgmcmc_noise(self, is_scaled=True, preconditioner=None,
**kwargs):
if is_scaled:
scale = 1.0 / self._get_T(**kwargs)
else:
scale = 1.0
if preconditioner is not None:
white_noise = preconditioner.precondition_noise(
parameters=self.parameters,
scale=scale,
)
else:
white_noise = {var: np.random.normal(
loc=0,
scale=np.sqrt(scale),
size=value.shape
) for var, value in self.parameters.as_dict().items()}
return white_noise
def sample_sgld(self, epsilon, **kwargs):
""" One Step of Stochastic Gradient Langevin Dynamics
Args:
epsilon (double): step size
**kwargs (kwargs): to pass to self.noisy_gradient
Returns:
parameters (Parameters): sampled parameters after one step
"""
if "preconditioner" in kwargs:
raise ValueError("Use SGRLD instead")
delta = self.noisy_gradient(**kwargs)
white_noise = self._get_sgmcmc_noise(**kwargs)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += \
epsilon * delta[var] + np.sqrt(2.0*epsilon) * white_noise[var]
return self.parameters
def sample_sgld_cv(self, epsilon, centering_parameters, centering_gradient,
**kwargs):
""" One Step of Stochastic Gradient Langevin Dynamics with Control Variates
grad = full_gradient(centering_parameters) + \
sub_gradient(parameters) - sub_gradient(centering_gradient)
Args:
epsilon (double): step size
centering_parameters (Parameters): centering parameters
centering_gradient (dict): full data grad of centering_parameters
**kwargs (kwargs): to pass to self.noisy_gradient
Returns:
parameters (Parameters): sampled parameters after one step
"""
if "preconditioner" in kwargs:
raise ValueError("Use SGRLD instead")
buffer_dicts = [
self._random_subsequence_and_buffers(
buffer_length=kwargs.get('buffer_length', 0),
subsequence_length=kwargs.get('subsequence_length', -1),
T=self._get_T(**kwargs),
)
for _ in range(kwargs.get('minibatch_size', 1))
]
cur_subseq_grad = self.noisy_gradient(
buffer_dicts=buffer_dicts, **kwargs)
centering_subseq_grad = self.noisy_gradient(
parameters=centering_parameters,
buffer_dicts=buffer_dicts, **kwargs)
delta = {}
for var in cur_subseq_grad.keys():
delta[var] = centering_gradient[var] + \
cur_subseq_grad[var] - centering_subseq_grad[var]
white_noise = self._get_sgmcmc_noise(**kwargs)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += \
epsilon * delta[var] + np.sqrt(2.0*epsilon) * white_noise[var]
return self.parameters
def sample_sgrld(self, epsilon, preconditioner, **kwargs):
""" One Step of Stochastic Gradient Riemannian Langevin Dynamics
theta += epsilon * (D(theta) * grad_logjoint + correction_term) + \
N(0, 2 epsilon D(theta))
Args:
epsilon (double): step size
preconditioner (object): preconditioner
Returns:
parameters (Parameters): sampled parameters after one step
"""
if kwargs.get("is_scaled", True):
scale = 1.0 / self._get_T(**kwargs)
else:
scale = 1.0
delta = self.noisy_gradient(preconditioner=preconditioner, **kwargs)
white_noise = self._get_sgmcmc_noise(
preconditioner=preconditioner, **kwargs)
correction = preconditioner.correction_term(
self.parameters, scale=scale)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += \
epsilon * (delta[var] + correction[var]) + \
np.sqrt(2.0*epsilon) * white_noise[var]
return self.parameters
def sample_gibbs(self):
""" One Step of Blocked Gibbs Sampler
Returns:
parameters (Parameters): sampled parameters after one step
"""
raise NotImplementedError()
def project_parameters(self, **kwargs):
""" Project parameters to valid values + fix constants
See **kwargs in __init__ for more details
"""
self.parameters.project_parameters(**self.options, **kwargs)
return self.parameters
## Fit Functions
def fit(self, iter_type, num_iters, output_all=False, observations=None,
init_parameters=None, tqdm=None, catch_interrupt=False, **kwargs):
""" Run multiple learning / inference steps
Args:
iter_type (string):
'SGD', 'ADAGRAD', 'SGLD', 'SGRLD', 'Gibbs', etc.
num_iters (int): number of steps
output_all (bool): whether to output each iteration's parameters
observations (ndarray): observations to fit on, optional
init_parameters (Parameters): initial parameters, optional
tqdm (tqdm): progress bar wrapper
catch_interrupt (bool): terminate early on Ctrl-C
**kwargs: for each iter
e.g. steps_per_iter, epsilon, minibatch_size,
subsequence_length, buffer_length,
preconditioner, pf_kwargs, etc.
see documentation for get_iter_step()
Returns: (depends on output_all arg)
parameters (Parameters):
parameters_list (list of Parameters): length num_iters+1
"""
if observations is not None:
self.observations = observations
if init_parameters is not None:
self.parameters = init_parameters.copy()
iter_func_names, iter_func_kwargs = \
self.get_iter_step(iter_type, tqdm=tqdm, **kwargs)
if output_all:
parameters_list = [None]*(num_iters+1)
parameters_list[0] = self.parameters.copy()
# Fit Loop
pbar = range(1, num_iters+1)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("fit using {0} iters".format(iter_type))
for it in pbar:
# Run iter funcs
try:
for func_name, func_kwargs in zip(iter_func_names,
iter_func_kwargs):
getattr(self, func_name)(**func_kwargs)
if output_all:
parameters_list[it] = self.parameters.copy()
except KeyboardInterrupt as e:
if catch_interrupt:
logger.warning("Interrupt in fit:\n{0}\n".format(e) + \
"Stopping early after {0} iters".format(it))
if output_all:
return parameters_list[:it]
else:
return self.parameters.copy()
else:
raise e
if output_all:
return parameters_list
else:
return self.parameters.copy()
def fit_timed(self, iter_type, max_time=60, min_save_time=1,
observations=None, init_parameters=None, tqdm=None, tqdm_iter=False,
catch_interrupt=False,
**kwargs):
""" Run multiple learning / inference steps
Args:
iter_type (string):
'SGD', 'ADAGRAD', 'SGLD', 'SGRLD', 'Gibbs', etc.
max_time (float): maxium time in seconds to run fit
min_save_time (float): min time between saved parameters
observations (ndarray): observations to fit on, optional
init_parameters (Parameters): initial parameters, optional
tqdm (tqdm): progress bar wrapper
catch_interrupt (bool): terminate early on Ctrl-C
**kwargs: for each iter
e.g. steps_per_iter, epsilon, minibatch_size,
subsequence_length, buffer_length,
preconditioner, pf_kwargs, etc.
see documentation for get_iter_step()
Returns:
parameters_list (list of Parameters):
times (list of float): fit time for each parameter
"""
parameters_list, times, _ = self.fit_evaluate(
iter_type=iter_type,
max_time=max_time, min_save_time=min_save_time,
observations=observations, init_parameters=init_parameters,
tqdm=tqdm, tqdm_iter=tqdm_iter,
catch_interrupt=catch_interrupt,
**kwargs)
return parameters_list['parameters'].tolist(), times['time'].tolist()
def fit_evaluate(self, iter_type, metric_functions=None,
max_num_iters=None, max_time=60, min_save_time=1,
observations=None, init_parameters=None, tqdm=None, tqdm_iter=False,
catch_interrupt=False, total_max_time=None,
**kwargs):
""" Run multiple learning / inference steps with evaluator
Args:
iter_type (string):
'SGD', 'ADAGRAD', 'SGLD', 'SGRLD', 'Gibbs', etc.
metric_functions (func or list of funcs): evaluation functions
Each function takes a sampler and returns a dict or list of dict
dict(metric=string, variable=string, value=double) for each
See metric_functions.py for examples
max_num_iters (int): maximum number of iterations to save
max_time (float): maxium time in seconds to run sampler
does *not* include time used by evaluator
min_save_time (float): min time between saved parameters
observations (ndarray): observations to fit on, optional
init_parameters (Parameters): initial parameters, optional
tqdm (tqdm): progress bar wrapper
tqdm_iter (bool): progress bar for each iteration
catch_interrupt (bool): terminate early on Ctrl-C
total_max_time (float): maximum time in seconds to run fit_evaluate
**kwargs: for each iter
e.g. steps_per_iter, epsilon, minibatch_size,
subsequence_length, buffer_length,
preconditioner, pf_kwargs, etc.
see documentation for get_iter_step()
Returns:
parameters_list (pd.DataFrame): parameters saved
columns:
iteration: number of iter_func_kwargs steps called
parameters: Parameters
times (pd.DataFrame): fit time for each saved parameter
columns:
iteration: number of iter_func_kwargs steps called
time: time used by iter_func_kwargs
metrics (pd.DataFrame): metric for each saved parameter
columns:
iteration: number of iter_func_kwargs steps called
metric: name of metric
variable: name of variable
value: value of metric for variable
"""
if observations is not None:
self.observations = observations
if init_parameters is not None:
self.parameters = init_parameters.copy()
evaluator = BaseEvaluator(
sampler=self,
metric_functions=metric_functions,
)
iter_func_names, iter_func_kwargs = \
self.get_iter_step(iter_type, tqdm=tqdm, **kwargs)
if tqdm_iter:
iter_func_kwargs[0]['tqdm'] = tqdm
num_iters = max_time//min_save_time
if max_num_iters is not None:
num_iters = min(num_iters, max_num_iters)
iteration = 0
total_time = 0
parameters_list = [None]*(num_iters+1)
times = np.zeros(num_iters+1)*np.nan
iterations = np.zeros(num_iters+1, dtype=int)
fit_start_time = time.time()
last_save_time = time.time()
parameters_list[0] = self.parameters.copy()
times[0] = total_time
iterations[0] = iteration
evaluator.eval_metric_functions(iteration=iteration)
# Fit Loop
pbar = range(1, num_iters+1)
if tqdm is not None:
pbar = tqdm(pbar)
for it in pbar:
if tqdm is not None:
pbar.set_description("fit using {0}".format(iter_type) + \
" on iter {0}".format(iteration)
)
try:
for step in range(1000):
# Run iter funcs
for func_name, func_kwargs in zip(iter_func_names,
iter_func_kwargs):
getattr(self, func_name)(**func_kwargs)
if time.time() - last_save_time > min_save_time:
parameters_list[it] = self.parameters.copy()
total_time += time.time() - last_save_time
times[it] = total_time
iteration += step + 1
iterations[it] = iteration
evaluator.eval_metric_functions(iteration=iteration)
last_save_time = time.time()
break
except KeyboardInterrupt as e:
if catch_interrupt:
logger.warning("Interrupt in fit_timed:\n{0}\n".format(e) + \
"Stopping early after {0} iters".format(it))
break
else:
raise e
if total_time > max_time:
# Break it total time on iter_funcs exceeds max time
break
if total_max_time is not None:
if fit_start_time - time.time() > total_max_time:
# Break it total time on fit_evalute exceeds total max time
break
valid = np.sum(~np.isnan(times))
parameters_list = pd.DataFrame(dict(
iteration = iterations[0:valid],
parameters = parameters_list[0:valid],
))
times = pd.DataFrame(dict(
iteration = iterations[0:valid],
time = times[0:valid],
))
metric = evaluator.get_metrics()
return parameters_list, times, metric
def get_iter_step(self, iter_type, steps_per_iteration=1, **kwargs):
# Returns iter_func_names, iter_func_kwargs
project_kwargs = kwargs.get("project_kwargs",{})
if iter_type == 'Gibbs':
iter_func_names = ["sample_gibbs", "project_parameters"]
iter_func_kwargs = [{}, project_kwargs]
elif iter_type == 'custom':
iter_func_names = kwargs.get("iter_func_names")
iter_func_kwargs = kwargs.get("iter_func_kwargs")
elif iter_type in ['SGD', 'ADAGRAD', 'SGLD', 'SGRD', 'SGRLD']:
grad_kwargs = dict(
epsilon = kwargs['epsilon'],
subsequence_length = kwargs['subsequence_length'],
buffer_length = kwargs['buffer_length'],
minibatch_size = kwargs.get('minibatch_size', 1),
kind = kwargs.get("kind", "marginal"),
num_samples = kwargs.get("num_samples", None),
**kwargs.get("pf_kwargs", {})
)
if 'num_sequences' in kwargs:
grad_kwargs['num_sequences'] = kwargs['num_sequences']
if 'use_scir' in kwargs:
grad_kwargs['use_scir'] = kwargs['use_scir']
if iter_type == 'SGD':
iter_func_names = ['step_sgd', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
elif iter_type == 'ADAGRAD':
iter_func_names = ['step_adagrad', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
elif iter_type == 'SGLD':
iter_func_names = ['sample_sgld', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
elif iter_type == 'SGRD':
grad_kwargs['preconditioner'] = self._get_preconditioner(
kwargs.get('preconditioner')
)
iter_func_names = ['step_precondition_sgd', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
elif iter_type == 'SGRLD':
grad_kwargs['preconditioner'] = self._get_preconditioner(
kwargs.get('preconditioner')
)
iter_func_names = ['sample_sgrld', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
else:
raise ValueError("Unrecognized iter_type {0}".format(iter_type))
iter_func_names = iter_func_names * steps_per_iteration
iter_func_kwargs = iter_func_kwargs * steps_per_iteration
return iter_func_names, iter_func_kwargs
def _get_preconditioner(self, preconditioner=None):
if preconditioner is None:
raise NotImplementedError("No Default Preconditioner for {}".format(
self.name))
return preconditioner
## Predict Functions
def predict(self, target='latent', distr=None, lag=None,
return_distr=None, num_samples=None,
kind='analytic', observations=None, parameters=None,
**kwargs):
""" Make predictions based on fit
Args:
target (string): variable to predict
'latent' - latent variables
'y' - observation variables
distr (string): what distribution to sample/target
'marginal' - marginal (default for return_distr)
'joint' - joint (default for sampling)
lag (int): distribution is p(U_t | Y_{1:t+lag})
default/None -> use all observations
return_distr (bool): return distribution
(default is True if num_samples is None otherwise True)
num_samples (int): number of samples return
kind (string): how to calculate distribution
'analytic' - use message passing
'pf' - use particle filter/smoother
observations (ndarray): observations to use
parameters (Parameters): parameters
kwargs: key word arguments
tqdm (tqdm): progress bar
see message_helper.latent_var_distr,
message_helper.y_distr,
message_helper.latent_var_sample,
message_helper.y_sample,
message_helper.pf_latent_var_distr,
message_helper.pf_y_distr,
for more details
Returns:
Depends on target, return_distr, num_samples
"""
observations = self._get_observations(observations)
if parameters is None:
parameters = self.parameters
if return_distr is None:
if kind == 'pf':
return_distr = True
else:
return_distr = (num_samples is None)
if kind == 'analytic':
if return_distr:
if distr is None:
distr = 'marginal'
if target == 'latent':
return self.message_helper.latent_var_distr(
distr=distr,
lag=lag,
observations=observations,
parameters=parameters,
**kwargs,
)
elif target == 'y':
return self.message_helper.y_distr(
distr=distr,
lag=lag,
observations=observations,
parameters=parameters,
**kwargs,
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
else:
if distr is None:
distr = 'joint'
if target == 'latent':
return self.message_helper.latent_var_sample(
distr=distr,
lag=lag,
num_samples=num_samples,
observations=observations,
parameters=parameters,
**kwargs,
)
elif target == 'y':
return self.message_helper.y_sample(
distr=distr,
lag=lag,
num_samples=num_samples,
observations=observations,
parameters=parameters,
**kwargs,
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
elif kind == 'pf':
if return_distr:
if target == 'latent':
return self.message_helper.pf_latent_var_distr(
lag=lag,
observations=observations,
parameters=parameters,
**kwargs,
)
elif target == 'y':
return self.message_helper.pf_y_distr(
distr=distr,
lag=lag,
observations=observations,
parameters=parameters,
**kwargs,
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
else:
raise ValueError("return_distr must be True for kind = pf")
else:
raise ValueError("Unrecognized kind == '{0}'".format(kind))
def simulate(self, T, init_message=None,
return_distr=False, num_samples=None,
kind='analytic', observations=None, parameters=None,
**kwargs):
""" Simulate dynamics
Args:
T (int): length of simulated data
init_message (dict): initial forward message
return_distr (bool): return distribution (default is False)
num_samples (int): number of samples return
kind (string): how to calculate distribution
'analytic' - use message passing
'pf' - use particle filter/smoother
observations (ndarray): observations
parameters (Parameters): parameters
Returns:
dict with key values depending on return_distr and num_samples
latent_vars (ndarray): simulated latent vars
observations (ndarray): simulated observations
latent_mean/latent_prob/latent_cov
observation_mean/observation_prob/observations_cov
"""
observations = self._get_observations(observations)
if parameters is None:
parameters = self.parameters
if kind == 'analytic':
if init_message is None:
init_message = self.message_helper.forward_message(
observations=observations,
parameters=parameters,
)
if return_distr:
return self.message_helper.simulate_distr(
T=T,
parameters=parameters,
init_message=init_message,
**kwargs
)
else:
return self.message_helper.simulate(
T=T,
parameters=parameters,
init_message=init_message,
num_samples=num_samples,
**kwargs
)
elif kind == 'pf':
raise NotImplementedError()
else:
raise ValueError("Unrecognized kind == '{0}'".format(kind))
## Attributes + Misc Helper Functions
@property
def observations(self):
return self._observations
@observations.setter
def observations(self, observations):
self._check_observation_shape(observations)
self._observations = observations
return
def _check_observation_shape(self, observations):
return
def _get_observations(self, observations, check_shape=True):
if observations is None:
observations = self.observations
if observations is None:
raise ValueError("observations not specified")
elif check_shape:
self._check_observation_shape(observations)
return observations
def _get_T(self, **kwargs):
T = kwargs.get('T')
if T is None:
observations = kwargs.get('observations')
observations = self._get_observations(observations)
T = observations.shape[0]
return T
# SeqSGMCMCSampler
class SeqSGMCMCSampler(object):
""" Mixin for handling a list of sequences """
def _get_T(self, **kwargs):
T = kwargs.get('T')
if T is None:
observations = kwargs.get('observations')
observations = self._get_observations(observations)
T = np.sum(np.shape(observation)[0] for observation in observations)
return T
def _check_observation_shape(self, observations):
if observations is not None:
for ii, observation in enumerate(observations):
try:
super()._check_observation_shape(observations=observation)
except ValueError as e:
raise ValueError("Error in observations[{0}] :\n{1}".format(
ii, e))
def exact_loglikelihood(self, observations=None, tqdm=None):
""" Return exact loglikelihood over all observation sequences """
observations = self._get_observations(observations)
loglikelihood = 0
pbar = observations
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("Seq Loglikelihood")
for observation in pbar:
loglikelihood += self.message_helper.marginal_loglikelihood(
observations=observation,
parameters=self.parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
return loglikelihood
def noisy_loglikelihood(self, num_sequences=-1, observations=None,
tqdm=None, **kwargs):
""" Subsequence Approximation to loglikelihood
Args:
num_sequences (int): how many observation sequences to use
(default = -1) is to use all observation sequences
"""
observations = self._get_observations(observations)
loglikelihood = 0
S = 0.0
sequence_indices = np.arange(len(observations))
if num_sequences != -1:
sequence_indices = np.random.choice(
sequence_indices, num_sequences, replace=False,
)
pbar = sequence_indices
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("Seq Loglikelihood")
for sequence_index in pbar:
S += observations[sequence_index].shape[0]
loglikelihood += super().noisy_loglikelihood(
observations=observations[sequence_index],
tqdm=tqdm,
**kwargs)
if num_sequences != -1:
loglikelihood *= self._get_T(**kwargs) / S
return loglikelihood
def predictive_loglikelihood(self, num_sequences=-1, observations=None,
tqdm=None, **kwargs):
""" Return the predictive loglikelihood given the parameters """
observations = self._get_observations(observations)
predictive_loglikelihood = 0
S = 0.0
sequence_indices = np.arange(len(observations))
if num_sequences != -1:
sequence_indices = np.random.choice(
sequence_indices, num_sequences, replace=False,
)
pbar = sequence_indices
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("Seq Pred Loglikelihood")
for sequence_index in pbar:
S += observations[sequence_index].shape[0]
predictive_loglikelihood += super().predictive_loglikelihood(
observations=observations[sequence_index],
tqdm=tqdm,
**kwargs)
if num_sequences != -1:
predictive_loglikelihood *= self._get_T(**kwargs) / S
return predictive_loglikelihood
def _noisy_grad_loglikelihood(self, num_sequences=-1, **kwargs):
""" Subsequence approximation to gradient of loglikelihood
Args:
num_sequences (int): how many observation sequences to use
(default = -1) is to use all observation sequences
"""
noisy_grad_loglike = None
S = 0.0
sequence_indices = np.arange(len(self.observations))
if num_sequences != -1:
sequence_indices = np.random.choice(
sequence_indices, num_sequences, replace=False,
)
for sequence_index in sequence_indices:
noisy_grad_index = super()._noisy_grad_loglikelihood(
observations=self.observations[sequence_index],
**kwargs)
S += self.observations[sequence_index].shape[0]
if noisy_grad_loglike is None:
noisy_grad_loglike = {var: noisy_grad_index[var]
for var in noisy_grad_index.keys()
}
else:
noisy_grad_loglike = {
var: noisy_grad_loglike[var] + noisy_grad_index[var]
for var in noisy_grad_index.keys()
}
if num_sequences != -1:
noisy_grad_loglike = {
var: noisy_grad_loglike[var] * self._get_T(**kwargs) / S
for var in noisy_grad_index.keys()
}
return noisy_grad_loglike
def predict(self, target='latent', distr=None, lag=None,
return_distr=None, num_samples=None,
kind='analytic', observations=None, parameters=None,
tqdm=None,
**kwargs):
""" Make predictions based on fit
Args:
target (string): variable to predict
'latent' - latent variables
'y' - observation variables
distr (string): what distribution to sample/target
'marginal' - marginal (default for return_distr)
'joint' - joint (default for sampling)
lag (int): distribution is p(U_t | Y_{1:t+lag})
default/None -> use all observations
return_distr (bool): return distribution
(default is True if num_samples is None otherwise True)
num_samples (int): number of samples return
kind (string): how to calculate distribution
'analytic' - use message passing
'pf' - use particle filter/smoother
observations (list of ndarray): observations to use
parameters (Parameters): parameters
kwargs: key word arguments
tqdm (tqdm): progress bar
see message_helper.latent_var_distr,
message_helper.y_distr,
message_helper.latent_var_sample,
message_helper.y_sample,
message_helper.pf_latent_var_distr,
message_helper.pf_y_distr,
for more details
Returns:
Depends on target, return_distr, num_samples
"""
observations = self._get_observations(observations)
if parameters is None:
parameters = self.parameters
if return_distr is None:
if kind == 'pf':
return_distr = True
else:
return_distr = (num_samples is None)
output = []
if tqdm is not None:
kwargs['tqdm'] = tqdm
observations = tqdm(observations, desc='sequence #')
if kind == 'analytic':
if return_distr:
if distr is None:
distr = 'marginal'
if target == 'latent':
for observation in observations:
output.append(
self.message_helper.latent_var_distr(
distr=distr,
lag=lag,
observations=observation,
parameters=parameters,
**kwargs,
)
)
elif target == 'y':
for observation in observations:
output.append(
self.message_helper.y_distr(
distr=distr,
lag=lag,
observations=observation,
parameters=parameters,
**kwargs,
)
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
else:
if distr is None:
distr = 'joint'
if target == 'latent':
for observation in observations:
output.append(
self.message_helper.latent_var_sample(
distr=distr,
lag=lag,
num_samples=num_samples,
observations=observation,
parameters=parameters,
**kwargs,
)
)
elif target == 'y':
for observation in observations:
output.append(
self.message_helper.y_sample(
distr=distr,
lag=lag,
num_samples=num_samples,
observations=observation,
parameters=parameters,
**kwargs,
)
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
elif kind == 'pf':
if return_distr:
if target == 'latent':
for observation in observations:
output.append(
self.message_helper.pf_latent_var_distr(
lag=lag,
observations=observation,
parameters=parameters,
**kwargs,
)
)
elif target == 'y':
for observation in observations:
output.append(
self.message_helper.pf_y_distr(
distr=distr,
lag=lag,
observations=observation,
parameters=parameters,
**kwargs,
)
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
else:
raise ValueError("return_distr must be True for kind = pf")
else:
raise ValueError("Unrecognized kind == '{0}'".format(kind))
return output
# SGMCMC Helper
class SGMCMCHelper(object):
""" Base Class for SGMCMC Helper """
def __init__(self, **kwargs):
raise NotImplementedError()
## Message Passing Functions
def forward_message(self, observations, parameters, forward_message=None,
**kwargs):
""" Calculate forward messages over the observations
Pr(u_t | y_{<=t}) for y_t in observations
Args:
observations (ndarray): observations
parameters (parameters): parameters
forward_message (dict): latent state prior Pr(u_{-1} | y_{<=-1})
Returns:
forward_message (dict): same format as forward_message
"""
if forward_message is None:
forward_message = self.default_forward_message
if np.shape(observations)[0] == 0: return forward_message
forward_message = self._forward_messages(
observations=observations,
parameters=parameters,
forward_message=forward_message,
only_return_last=True,
**kwargs
)
return forward_message
def backward_message(self, observations, parameters, backward_message=None,
**kwargs):
""" Calculate backward messages over the observations
Pr(y_{>t} | u_t) for y_t in observations
Args:
observations (ndarray): observations
parameters (parameters): parameters
backward_message (dict): backward message Pr(y_{>T-1} | u_{T-1})
Returns:
backward_message (dict): same format as forward_message
"""
if backward_message is None:
backward_message = self.default_backward_message
if np.shape(observations)[0] == 0: return backward_message
backward_message = self._backward_messages(
observations=observations,
parameters=parameters,
backward_message=backward_message,
only_return_last=True,
**kwargs
)
return backward_message
def forward_pass(self, observations, parameters,
forward_message=None, include_init_message=False, **kwargs):
""" Calculate forward messages over the observations
Pr(u_t | y_{<=t}) for y_t in observations
Args:
observations (ndarray): observations
parameters (parameters): parameters
forward_message (dict): latent state prior Pr(u_{-1} | y_{<=-1})
include_init_message (boolean) whether to include t = -1
Returns:
forward_messages (list of dict): same format as forward_message
"""
if forward_message is None:
forward_message = self.default_forward_message
if np.shape(observations)[0] == 0:
if include_init_message:
return [forward_message]
else:
return []
forward_messages = self._forward_messages(
observations=observations,
parameters=parameters,
forward_message=forward_message,
**kwargs
)
if include_init_message:
return forward_messages
else:
return forward_messages[1:]
def backward_pass(self, observations, parameters,
backward_message=None, include_init_message=False, **kwargs):
""" Calculate backward message over the observations
Pr(y_{>t} | u_t) for y_t in observations
Args:
observations (ndarray): observations
parameters (parameters): parameters
backward_message (dict): backward message Pr(y_{>T-1} | u_{T-1})
include_init_message (boolean) whether to include t = -1
Returns:
backward_messages (list of dict): same format as backward_message
"""
if backward_message is None:
backward_message = self.default_backward_message
if np.shape(observations)[0] == 0:
if include_init_message:
return [backward_message]
else:
return []
backward_messages = self._backward_messages(
observations=observations,
parameters=parameters,
backward_message=backward_message,
**kwargs
)
if include_init_message:
return backward_messages
else:
return backward_messages[1:]
def _forward_messages(self, observations, parameters, forward_message,
weights=None, only_return_last=False, **kwargs):
raise NotImplementedError()
def _backward_messages(self, observations, parameters, backward_message,
weights=None, only_return_last=False, **kwargs):
raise NotImplementedError()
def _forward_message(self, observations, parameters, forward_message,
**kwargs):
return self._forward_messages(observations, parameters, forward_message,
only_return_last=True, **kwargs)
def _backward_message(self, observations, parameters, backward_message,
**kwargs):
return self._backward_messages(observations, parameters,
backward_message, only_return_last=True, **kwargs)
## Loglikelihood Functions
def marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, weights=None,
tqdm=None):
""" Calculate the marginal loglikelihood Pr(y | theta)
Args:
observations (ndarray): observations
parameters (Parameters): parameters
forward_message (dict): latent state forward message
backward_message (dict): latent state backward message
weights (ndarray): optional, weights for loglikelihood calculation
Returns:
marginal_loglikelihood (float): marginal loglikelihood
"""
raise NotImplementedError()
def predictive_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, lag=1):
""" Calculate the predictive loglikelihood
pred_loglikelihood = sum_t Pr(y_{t+lag} | y_{<t} theta)
Args:
observations (ndarray): observations
parameters (Parameters): parameters
forward_message (dict): latent state forward message
backward_message (dict): latent state backward message
lag (int): how many steps ahead to predict
Returns:
pred_loglikelihood (float): predictive loglikelihood
"""
raise NotImplementedError()
def complete_data_loglikelihood(self, observations, latent_vars, parameters,
forward_message=None, weights=None, **kwargs):
""" Calculate the complete data loglikelihood Pr(y, u | theta)
Args:
observations (ndarray): observations
latent_vars (ndarray): latent vars
parameters (Parameters): parameters
forward_message (dict): latent state forward message
weights (ndarray): optional, weights for loglikelihood calculation
Returns:
complete_data_loglikelihood (float): complete data loglikelihood
"""
raise NotImplementedError()
## Gradient Functions
def gradient_marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, weights=None, **kwargs):
""" Gradient Calculation
Gradient of log Pr(y_[0:T) | y_<0, y_>=T, parameters)
Args:
observations (ndarray): num_obs observations
parameters (Parameters): parameters
forward_message (dict): Pr(u_-1, y_<0 | parameters)
backward_message (dict): Pr(y_>T | u_T, parameters)
weights (ndarray): how to weight terms
Returns
grad (dict): grad of variables in parameters
"""
raise NotImplementedError()
def gradient_complete_data_loglikelihood(self, observations, latent_vars,
parameters, forward_message=None, weights=None, **kwargs):
""" Gradient Calculation
Gradient of log Pr(y_[0:T), u_[0:T) | y_<0, parameters)
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): num_obs latent vars
parameters (Parameters): parameters
forward_message (dict): Pr(u_-1, y_<0 | parameters)
weights (ndarray): how to weight terms
Returns
grad (dict): grad of variables in parameters
"""
raise NotImplementedError()
## Gibbs Functions
def parameters_gibbs_sample(self, observations, latent_vars, prior,
**kwargs):
""" Gibbs sample parameters based on data
Samples parameters from the posterior conditional distribution
theta ~ Pr(theta | y, u)
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): num_obs latent variables
prior (prior): prior
Returns
sample_parameters (parameters): sampled parameters
"""
sufficient_stat = self.calc_gibbs_sufficient_statistic(
observations, latent_vars, **kwargs,
)
sample_parameters = prior.sample_posterior(
sufficient_stat, **kwargs,
)
return sample_parameters
def calc_gibbs_sufficient_statistic(self, observations, latent_vars,
**kwargs):
""" Gibbs Sample Sufficient Statistics
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): latent vars
Returns:
sufficient_stat (dict of dict)
keys are parameter
values are dict for parameter's sufficient statistics
"""
raise NotImplementedError()
## Predict Functions
def latent_var_distr(self, observations, parameters,
distr='marginal', lag=None,
forward_message=None, backward_message=None,
tqdm=None, **kwargs):
""" Sample latent vars distribution conditional on observations
Returns distribution for (u_t | y_{<= t+lag}, theta)
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
forward_message (dict): forward message
backward_message (dict): backward message
Returns:
Depends on latent var type, Gaussian -> mean, cov; Discrete -> prob
"""
raise NotImplementedError()
def latent_var_sample(self, observations, parameters,
distr='joint', lag=None, num_samples=None,
forward_message=None, backward_message=None,
include_init=False, tqdm=None, **kwargs):
""" Sample latent vars conditional on observations
Samples u_t ~ u_t | y_{<= t+lag}, theta
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
num_samples (int, optional) number of samples
forward_message (dict): forward message
backward_message (dict): backward message
include_init (bool, optional): whether to sample u_{-1} | y
Returns:
sampled_latent_vars : shape depends on num_samples parameters
last dimension is num_samples
"""
raise NotImplementedError()
def y_distr(self, observations, parameters,
distr='marginal', lag=None,
forward_message=None, backward_message=None,
latent_var=None, tqdm=None, **kwargs):
""" Sample observation distribution conditional on observations
Returns distribution for (y_t* | y_{<= t+lag}, theta)
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
forward_message (dict): forward message
backward_message (dict): backward message
latent_var (ndarray): latent vars
if provided, will return (y_t* | u_t, theta) instead
Returns:
Depends on observation type, Gaussian -> mean, cov; Discrete -> prob
"""
raise NotImplementedError()
def y_sample(self, observations, parameters,
distr='joint', lag=None, num_samples=None,
forward_message=None, backward_message=None,
latent_var=None, tqdm=None, **kwargs):
""" Sample new observations conditional on observations
Samples y_t* ~ y_t* | y_{<= t+lag}, theta
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
num_samples (int, optional) number of samples
forward_message (dict): forward message
backward_message (dict): backward message
latent_var (ndarray): latent vars
if provided, will sample from (y_t* | u_t, theta) instead
must match num_samples parameters
Returns:
sampled_observations : shape depends on num_samples parameters
last dimension is num_samples
"""
raise NotImplementedError()
def simulate_distr(self, T, parameters, init_message=None, tqdm=None):
raise NotImplementedError()
def simulate(self, T, parameters, init_message=None, num_samples=None, tqdm=None):
raise NotImplementedError()
## PF Functions
def pf_loglikelihood_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel='prior', forward_message=None,
**kwargs):
""" Particle Filter Marginal Log-Likelihood Estimate
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
weights (ndarray): weights (to correct storchastic approx)
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(u_t | u_{t-1})
"optimal" - bootstrap filter P(u_t | u_{t-1}, Y_t)
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Return:
loglikelihood (double): marignal log likelihood estimate
"""
raise NotImplementedError()
def pf_predictive_loglikelihood_estimate(self, observations, parameters,
num_steps_ahead=1,
subsequence_start=0, subsequence_end=None, weights=None,
pf="filter", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Filter Predictive Log-Likelihoood Estimate
Returns predictive log-likleihood for k = [0,1, ...,num_steps_ahead]
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
num_steps_ahead (int): number of steps
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
N (int): number of particles used by particle filter
kernel (string): kernel to use
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Return:
predictive_loglikelihood (num_steps_ahead + 1 ndarray)
"""
raise NotImplementedError()
def pf_gradient_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Smoother Gradient Estimate
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
weights (ndarray): weights (to correct storchastic approx)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(u_t | u_{t-1})
"optimal" - bootstrap filter P(u_t | u_{t-1}, Y_t)
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Return:
grad (dict): grad of variables in parameters
"""
raise NotImplementedError()
def pf_latent_var_distr(self, observations, parameters, lag=None,
subsequence_start=0, subsequence_end=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Sample latent vars distribution conditional on observations
Returns distribution for (u_t | y_{<= t+lag}, theta)
Estimated using particle filter/smoother
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(u_t | u_{t-1})
"optimal" - bootstrap filter P(u_t | u_{t-1}, Y_t)
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Returns:
Depends on latent var type, Gaussian -> mean, cov; Discrete -> prob
"""
raise NotImplementedError()
def pf_y_distr(self, observations, parameters,
distr='marginal', lag=None,
subsequence_start=0, subsequence_end=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Sample observation distribution conditional on observations
Returns distribution for (u_t | y_{<= t+lag}, theta)
Estimated using particle filter/smoother
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(u_t | u_{t-1})
"optimal" - bootstrap filter P(u_t | u_{t-1}, Y_t)
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Returns:
Depends on latent var type, Gaussian -> mean, cov; Discrete -> prob
"""
raise NotImplementedError()
# Helper Function for Sampling Subsequences
def random_subsequence_and_weights(S, T, partition_style=None):
""" Get Subsequence + Weights
Args:
S (int): length of subsequence
T (int): length of full sequence
partition_style (string): what type of partition
'strict' - strict partition, with weights
'uniform' - uniformly, with weights
'naive' - uniformly, with incorrect weights (not recommended)
Returns:
subsequence_start (int): start of subsequence (inclusive)
subsequence_end (int): end of subsequence (exclusive)
weights (ndarray): weights for [start,end)
"""
if partition_style is None:
partition_style = 'uniform'
if partition_style == 'strict':
if T % S != 0:
raise ValueError("S {0} does not evenly divide T {1}".format(S, T)
)
subsequence_start = np.random.choice(np.arange(0, T//S)) * S
subsequence_end = subsequence_start + S
weights = np.ones(S, dtype=float)*T/S
elif partition_style == 'uniform':
subsequence_start = np.random.randint(0, T-S+1)
subsequence_end = subsequence_start + S
t = np.arange(subsequence_start, subsequence_end)
if subsequence_end <= 2*S:
num_sequences = np.min(np.array([
t+1, np.ones_like(t)*min(S, T-S+1)
]), axis=0)
elif subsequence_start >= T-2*S-1:
num_sequences = np.min(np.array([
T-t, np.ones_like(t)*min(S, T-S+1)
]), axis=0)
else:
num_sequences = np.ones(S)*S
weights = np.ones(S, dtype=float)*(T-S+1)/num_sequences
elif partition_style == 'naive':
# Not recommended because the weights are incorrect
subsequence_start = np.random.randint(0, T-S+1)
subsequence_end = subsequence_start + S
weights = np.ones(S, dtype=float)*T/S
else:
raise ValueError("Unrecognized partition_style = '{0}'".format(
partition_style))
return int(subsequence_start), int(subsequence_end), weights
| 82,434 | 39.789213 | 86 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/hmm_helper.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ..sgmcmc_sampler import SGMCMCHelper
from .._utils import random_categorical
class HMMHelper(SGMCMCHelper):
""" HMM Helper
forward_message (dict) with keys
prob_vector (ndarray) dimension num_states
log_constant (double) log scaling const
backward_message (dict) with keys
likelihood_vector (ndarray) dimension num_states
log_constant (double) log scaling const
"""
def __init__(self, forward_message=None, backward_message=None, **kwargs):
if forward_message is None:
forward_message = {
'prob_vector': np.ones(self.num_states) / \
self.num_states,
'log_constant': 0.0,
}
self.default_forward_message=forward_message
if backward_message is None:
backward_message = {
'likelihood_vector': np.ones(self.num_states)/self.num_states,
'log_constant': np.log(self.num_states),
}
self.default_backward_message=backward_message
return
def _forward_messages(self, observations, parameters, forward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of forward messages
# y is num_obs x m matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
forward_messages = [None]*(num_obs+1)
forward_messages[0] = forward_message
Pi = parameters.pi
prob_vector = forward_message['prob_vector']
log_constant = forward_message['log_constant']
pbar = range(num_obs)
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description("forward messages")
for t in pbar:
y_cur = observations[t]
weight_t = 1.0 if weights is None else weights[t]
P_t, log_t = self._likelihoods(y_cur, parameters=parameters)
prob_vector = np.dot(prob_vector, Pi)
prob_vector = prob_vector * P_t
log_constant += weight_t * (log_t + np.log(np.sum(prob_vector)))
prob_vector = prob_vector/np.sum(prob_vector)
if not only_return_last:
forward_messages[t+1] = {
'prob_vector': prob_vector,
'log_constant': log_constant,
}
if only_return_last:
last_message = {
'prob_vector': prob_vector,
'log_constant': log_constant,
}
return last_message
else:
return forward_messages
def _backward_messages(self, observations, parameters, backward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of backward messages
# y is num_obs x m matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
backward_messages = [None]*(num_obs+1)
backward_messages[-1] = backward_message
Pi = parameters.pi
prob_vector = backward_message['likelihood_vector']
log_constant = backward_message['log_constant']
y_cur = None
pbar = reversed(range(num_obs))
if tqdm is not None:
pbar = tqdm(pbar, total=num_obs, leave=False)
pbar.set_description("backward messages")
for t in pbar:
y_cur = observations[t]
weight_t = 1.0 if weights is None else weights[t]
P_t, log_t = self._likelihoods(y_cur=y_cur,
parameters=parameters)
prob_vector = P_t * prob_vector
prob_vector = np.dot(Pi, prob_vector)
log_constant += weight_t * (log_t + np.log(np.sum(prob_vector)))
prob_vector = prob_vector/np.sum(prob_vector)
if not only_return_last:
backward_messages[t] = {
'likelihood_vector': prob_vector,
'log_constant': log_constant,
}
if only_return_last:
last_message = {
'likelihood_vector': prob_vector,
'log_constant': log_constant,
}
return last_message
else:
return backward_messages
def marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, weights=None,
**kwargs):
# Run forward pass + combine with backward pass
# y is num_obs x p x m matrix
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
# forward_pass is Pr(z_{T-1} | y_{<=T-1})
forward_pass = self._forward_message(
observations=observations,
parameters=parameters,
forward_message=forward_message,
weights=weights,
**kwargs)
likelihood = np.dot(
forward_pass['prob_vector'],
backward_message['likelihood_vector'],
)
weight_t = 1.0 if weights is None else weights[-1]
loglikelihood = forward_pass['log_constant'] + \
weight_t * (np.log(likelihood) + backward_message['log_constant'])
return loglikelihood
def predictive_loglikelihood(self, observations, parameters, lag=1,
forward_message=None, backward_message=None, tqdm=None, **kwargs):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
# Calculate Filtered
if lag == 0:
forward_messages = self.forward_pass(observations,
parameters, forward_message, tqdm=tqdm, **kwargs)
else:
forward_messages = self.forward_pass(observations[0:-lag],
parameters, forward_message, tqdm=tqdm, **kwargs)
loglike = 0.0
Pi = parameters.pi
pbar = range(lag, np.shape(observations)[0])
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description('predictive_loglikelihood')
for t in pbar:
# Calculate Pr(z_t | y_{<=t-lag}, theta)
prob_vector = forward_messages[t-lag]['prob_vector']
for l in range(lag):
prob_vector = np.dot(prob_vector, Pi)
P_t, log_constant = self._likelihoods(observations[t], parameters)
likelihood = np.dot(prob_vector, P_t)
loglike += np.log(likelihood) + log_constant
return loglike
def complete_data_loglikelihood(self, observations, latent_vars, parameters, forward_message=None, weights=None, **kwargs):
if forward_message is None:
forward_message = self.default_forward_message
log_constant = 0.0
Pi = parameters.pi
z_prev = forward_message.get('z_prev')
for t, (y_t, z_t) in enumerate(zip(observations, latent_vars)):
weight_t = 1.0 if weights is None else weights[t]
# Pr(Z_t | Z_t-1)
if (z_prev is not None):
log_c = np.log(Pi[z_prev, z_t])
log_constant += log_c * weight_t
# Pr(Y_t | Z_t)
log_c = self._emission_loglikelihood(y_t, z_t, parameters)
log_constant += log_c * weight_t
z_prev = z_t
return log_constant
def latent_var_distr(self, observations, parameters,
distr='marginal', lag=None,
forward_message=None, backward_message=None,
tqdm=None):
if distr != 'marginal':
raise NotImplementedError()
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
L = np.shape(observations)[0]
Pi = parameters.pi
z_prob = np.zeros((L, parameters.num_states), dtype=float)
# Forward Pass
forward_messages = self.forward_pass(
observations=observations,
parameters=parameters,
forward_message=forward_message,
tqdm=tqdm
)
pbar = range(L)
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description('calc latent var distr')
if lag is None:
# Smoothing
backward_messages = self.backward_pass(
observations=observations,
parameters=parameters,
backward_message=backward_message,
tqdm=tqdm
)
for t in pbar:
log_prob_t = (np.log(forward_messages[t]['prob_vector']) + \
np.log(backward_messages[t]['likelihood_vector']))
log_prob_t -= np.max(log_prob_t)
z_prob[t] = np.exp(log_prob_t)/np.sum(np.exp(log_prob_t))
return z_prob
elif lag <= 0:
# Prediction/Filtering
for t in pbar:
if t+lag >= 0:
prob_vector = forward_messages[t+lag]['prob_vector']
else:
prob_vector = forward_message['prob_vector']
# Forward Simulate
for _ in range(-lag):
prob_vector = np.dot(prob_vector, Pi)
log_prob_t = np.log(prob_vector)
log_prob_t -= np.max(log_prob_t)
z_prob[t] = np.exp(log_prob_t)/np.sum(np.exp(log_prob_t))
return z_prob
else:
# Fixed-lag Smoothing
for t in pbar:
# Backward Messages
back_obs = observations[t:min(t+lag, L)]
fixed_lag_message = self.backward_message(
observations=back_obs,
parameters=parameters,
backward_message=backward_message,
)
# Output
log_prob_t = (np.log(forward_messages[t]['prob_vector']) + \
np.log(fixed_lag_message[t]['likelihood_vector']))
log_prob_t -= np.max(log_prob_t)
z_prob[t] = np.exp(log_prob_t)/np.sum(np.exp(log_prob_t))
return z_prob
def latent_var_sample(self, observations, parameters,
forward_message=None, backward_message=None,
distr='joint', lag=None, num_samples=None,
tqdm=None, include_init=False, **kwargs):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
if distr == 'joint' and lag is not None:
raise ValueError("Must set distr to 'marginal' for lag != None")
Pi = parameters.pi
L = observations.shape[0]
if num_samples is not None:
z = np.zeros((L, num_samples), dtype=int)
else:
z = np.zeros((L), dtype=int)
if distr == 'joint':
# Backward Pass
backward_messages = self.backward_pass(
observations=observations,
parameters=parameters,
backward_message=backward_message,
tqdm=tqdm
)
# Forward Sampler
pbar = enumerate(backward_messages)
if tqdm is not None:
pbar = tqdm(pbar, total=len(backward_messages), leave=False)
pbar.set_description("forward smoothed sampling z")
for t, backward_t in pbar:
y_cur = observations[t]
if t == 0:
post_t = np.dot(forward_message['prob_vector'], Pi)
if num_samples is not None:
post_t = np.outer(np.ones(num_samples), post_t)
else:
post_t = Pi[z[t-1]]
P_t, _ = self._likelihoods(
y_cur=y_cur,
parameters=parameters,
)
post_t = post_t * (P_t * backward_t['likelihood_vector'])
if num_samples is not None:
post_t = post_t/np.sum(post_t, axis=-1)[:,np.newaxis]
z[t] = np.array([random_categorical(post_t_s)
for post_t_s in post_t], dtype=int)
else:
post_t = post_t/np.sum(post_t)
z[t] = random_categorical(post_t)
return z
elif distr == 'marginal':
# Calculate Distribution
z_prob = self.latent_var_distr(observations, parameters,
lag=lag, forward_message=forward_message,
backward_message=backward_message, tqdm=tqdm,
)
# Sample from Distribution
L = z_prob.shape[0]
if num_samples is not None:
x = np.zeros((L, num_samples), dtype=int)
else:
x = np.zeros((L), dtype=int)
pbar = reversed(range(L))
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description("sampling z")
for t in pbar:
z[t] = random_categorical(z_prob[t], size=num_samples)
return z
else:
raise ValueError("Unrecognized `distr'; {0}".format(distr))
return
def calc_gibbs_sufficient_statistic(self, observations, latent_vars,
**kwargs):
""" Gibbs Sample Sufficient Statistics
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): latent vars
Returns:
sufficient_stat (dict)
"""
raise NotImplementedError()
def _likelihoods(self, y_cur, parameters):
logP_t = self._emission_loglikelihoods(
y_cur=y_cur, parameters=parameters,
)
log_constant = np.max(logP_t)
logP_t = logP_t - log_constant
P_t = np.exp(logP_t)
return P_t, log_constant
def _emission_loglikelihoods(self, y_cur, parameters):
# Return loglikelihoods of observation y_cur for each z
# Override for compute/memory efficiency
return np.array([self._emission_loglikelihood(y_cur, kk, parameters)
for kk in range(parameters.num_states)])
def _emission_loglikelihood(self, y_cur, z_cur, parameters):
# Return loglikelihoods of observation y_cur for z_cur
raise NotImplementedError()
def gradient_marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None,
tqdm=None):
raise NotImplementedError()
from ..sgmcmc_sampler import SGMCMCSampler
from ..variables.probweight import (
BernoulliParamHelper,
TransitionMatrixParamHelper,
)
class CIRSamplerMixin(SGMCMCSampler):
# Adjust Sample SGLD and SGRLD to allow for Baker sampling
def _get_probweight_param_names(self):
names = []
for param_helper in self.parameters._param_helper_list:
if isinstance(param_helper, TransitionMatrixParamHelper):
names.append(param_helper._logit_name)
names.append(param_helper._expanded_name)
return names
def _sample_cir(self, var, a, epsilon):
""" Sample from Cox-Ingersoll-Ross Process
theta_new = 0.5*(1-exp(-epsilon)) * W
W \sim NoncentralChi2(2a, 2*theta*exp(-epsilon)/(1-exp(-epsilon))
Args:
var (string): variable name
a (float): Dirichlet sufficient statistic
epsilon (float): stepsize
"""
if var.startswith("logit_"):
is_logit = True
theta = getattr(self.parameters, "expanded_{0}".format(
var.replace("logit_", "", 1)))
elif var.startswith("expanded_"):
is_logit = False
theta = getattr(self.parameters, var)
else:
raise ValueError("var must be logit_ or expanded_")
if np.any(a < 0.001):
raise RuntimeError("Why is a < 0.001")
W = np.random.noncentral_chisquare(
df = 2*a,
nonc = 2*theta*np.exp(-epsilon)/(1-np.exp(-epsilon)),
)
theta_new = 0.5*(1-np.exp(-epsilon))*W + 1e-99
if is_logit:
logit_new = np.log(np.abs(theta_new) + 1e-99)
logit_new -= np.outer(
np.mean(logit_new, axis=1),
np.ones(logit_new.shape[1])
)
return logit_new
else:
return theta_new
def noisy_gradient(self, preconditioner=None, is_scaled=True,
use_scir=False, **kwargs):
""" Noisy Gradient Estimate
noisy_gradient = -grad tilde{U}(theta)
= grad marginal loglike + grad logprior
Monte Carlo Estimate of gradient (using buffering)
Args:
preconditioner (object): preconditioner for gradients
is_scaled (boolean): scale gradient by 1/T
use_scir (bool): whether to use Cox-Ingersoll-Ross sampling for
probability simplex variables
**kwargs: arguments for `self._noisy_grad_loglikelihood()`
For example: minibatch_size, buffer_length, use_analytic
Returns:
noisy_gradient (dict): dict of gradient vectors
"""
noisy_grad_loglike = \
self._noisy_grad_loglikelihood(use_scir=use_scir, **kwargs)
noisy_grad_prior = self.prior.grad_logprior(
parameters=kwargs.get('parameters',self.parameters),
use_scir=use_scir,
**kwargs
)
noisy_gradient = {var: noisy_grad_prior[var] + noisy_grad_loglike[var]
for var in noisy_grad_prior}
if preconditioner is None:
if is_scaled:
for var in noisy_gradient:
noisy_gradient[var] /= self._get_T(**kwargs)
else:
scale = 1.0/self._get_T(**kwargs) if is_scaled else 1.0
noisy_gradient = preconditioner.precondition(noisy_gradient,
parameters=kwargs.get('parameters',self.parameters),
scale=scale,
use_scir=use_scir,
)
return noisy_gradient
def sample_sgld(self, epsilon, use_scir=True, **kwargs):
""" One Step of Stochastic Gradient Langevin Dynamics
Args:
epsilon (double): step size
use_scir (bool): whether to use Cox-Ingersoll-Ross sampling for
probability simplex variables
**kwargs (kwargs): to pass to self.noisy_gradient
Returns:
parameters (Parameters): sampled parameters after one step
"""
# Use default sample_sgld if use_scir is False
if not use_scir:
return super().sample_sgld(epsilon, **kwargs)
# Use Stochastic Cox-Ingersoll-Ross Algorithm of Baker et al. 2018
if kwargs.get("is_scaled", True):
scale = 1.0 / self._get_T(**kwargs)
else:
scale = 1.0
if "preconditioner" in kwargs:
raise ValueError("Use SGRLD instead")
delta = self.noisy_gradient(use_scir=use_scir, **kwargs)
white_noise = self._get_sgmcmc_noise(**kwargs)
probweight_param_names = self._get_probweight_param_names()
for var in self.parameters.var_dict:
if var in probweight_param_names:
self.parameters.var_dict[var] = self._sample_cir(
var=var, a=delta[var]/scale, epsilon=epsilon)
else:
self.parameters.var_dict[var] += \
epsilon * delta[var] + np.sqrt(2.0*epsilon) * white_noise[var]
return self.parameters
def sample_sgrld(self, epsilon, preconditioner, use_scir=True, **kwargs):
""" One Step of Stochastic Gradient Riemannian Langevin Dynamics
theta += epsilon * (D(theta) * grad_logjoint + correction_term) + \
N(0, 2 epsilon D(theta))
Args:
epsilon (double): step size
preconditioner (object): preconditioner
use_scir (bool): whether to use Cox-Ingersoll-Ross sampling for
probability simplex variables
Returns:
parameters (Parameters): sampled parameters after one step
"""
# Use default sample_sgrld if use_scir is False
if not use_scir:
return super().sample_sgrld(epsilon, preconditioner, **kwargs)
# Use Stochastic Cox-Ingersoll-Ross Algorithm of Baker et al. 2018
if kwargs.get("is_scaled", True):
scale = 1.0 / self._get_T(**kwargs)
else:
scale = 1.0
delta = self.noisy_gradient(preconditioner=preconditioner,
use_scir=use_scir, **kwargs)
white_noise = self._get_sgmcmc_noise(
preconditioner=preconditioner, **kwargs)
correction = preconditioner.correction_term(
self.parameters, scale=scale)
probweight_param_names = self._get_probweight_param_names()
for var in self.parameters.var_dict:
if var in probweight_param_names:
self.parameters.var_dict[var] = self._sample_cir(
var=var, a=delta[var]/scale, epsilon=epsilon)
else:
self.parameters.var_dict[var] += \
epsilon * (delta[var] + correction[var]) + \
np.sqrt(2.0*epsilon) * white_noise[var]
return self.parameters
| 22,130 | 37.894552 | 127 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/__init__.py | 0 | 0 | 0 | py | |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/arphmm/sampler.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...sgmcmc_sampler import SGMCMCSampler, SeqSGMCMCSampler
from .parameters import ARPHMMParameters, ARPHMMPrior, ARPHMMPreconditioner
from .helper import ARPHMMHelper
class ARPHMMSampler(SGMCMCSampler):
# Note d = m*p
def __init__(self, num_states, m, p=None, d=None,
observations=None, prior=None,
parameters=None, forward_message=None,
name="ARPHMMHelper",
**kwargs):
self.options = kwargs
self.num_states = num_states
self.m = m
if p is None:
if d is None:
raise ValueError("Need to specify p or d dimension parameter")
else:
self.p = d//m
else:
self.p = p
self.name = name
self.setup(
observations=observations,
prior=prior,
parameters=parameters,
forward_message=forward_message,
)
return
@property
def d(self):
return self.p*self.m
def setup(self, observations, prior, parameters=None,
forward_message=None):
""" Initialize the sampler
Args:
observations (ndarray): T by p+1 by m ndarray of time series values
prior (ARPMMHMMPrior): prior
forward_message (ndarray): prior probability for latent state
parameters (ARPHMMParameters): initial parameters
(optional, will sample from prior by default)
"""
self.observations = observations
if prior is None:
prior = ARPHMMPrior.generate_default_prior(
num_states=self.num_states, m=self.m, d=self.d,
)
self.prior = prior
if parameters is None:
self.parameters = self.prior.sample_prior()
else:
if not isinstance(parameters, ARPHMMParameters):
raise ValueError("parameters is not a ARPHMMParameter")
self.parameters = parameters
if forward_message is None:
forward_message = {
'prob_vector': np.ones(self.num_states) / \
self.num_states,
'log_constant': 0.0,
}
self.forward_message = forward_message
self.backward_message = {
'likelihood_vector': np.ones(self.num_states)/self.num_states,
'log_constant': np.log(self.num_states),
}
self.message_helper=ARPHMMHelper(
num_states=self.num_states,
m=self.m,
d=self.d,
forward_message=forward_message,
backward_message=self.backward_message,
)
return
def _check_observation_shape(self, observations):
if observations is None:
return
# Check Shape
if np.shape(observations)[1] != self.p+1:
raise ValueError("observations second dimension does not match p+1, did you call stack_y?")
if np.shape(observations)[2] != self.m:
raise ValueError("observations third dimension does not match m, did you call stack_y?")
def _get_preconditioner(self, preconditioner=None):
if preconditioner is None:
preconditioner = ARPHMMPreconditioner()
return preconditioner
def init_parameters_from_z(self, z, observations=None):
""" Get initial parameters for the sampler
Args:
z (ndarray): latent var assignment
Return:
init_parameters (ARPHMMParameters): init_parameters
"""
observations = self._get_observations(observations=observations)
T = self._get_T(observations=observations)
# Check z is appropriate size
if np.shape(z)[0] != self._get_T():
raise ValueError("z must be length T = {0}".format(self._get_T()))
if not np.issubdtype(z.dtype, np.integer):
raise ValueError("z must be integers, not {0}".format(z.dtype))
if np.max(z) >= self.num_states or np.min(z) < 0:
raise ValueError("z must be in (0, \ldots, {0}-1)".format(
self.num_states))
# Perform on Gibb Step
init_parameters = self.message_helper.parameters_gibbs_sample(
observations=observations,
latent_vars=z,
forward_message=self.forward_message,
backward_message=self.backward_message,
prior=self.prior,
)
self.parameters = init_parameters
return init_parameters
def init_parameters_from_k_means(self, observations=None,
lags=[0], kmeans=None, **kwargs):
""" Get initial parameters for the sampler
Use KMeans on data (treating observations as independent)
Each point is concat(y[lag] for lag in lags)
Args:
observations (ndarray): observations
lags (list of indices): indices of lags to use for clustering
kmeans (sklearn model): e.g. sklearn.cluster.KMeans
**kwargs (dict): keyword args to pass to sklearn's kmean
"n_init" : int (default = 10)
"max_iter": int (default = 300)
"n_jobs" : int (default = 1)
See sklearn.cluster.KMeans for more
Returns:
init_parameters (ARPHMMParameters): init_parameters
"""
from sklearn.cluster import KMeans, MiniBatchKMeans
observations = self._get_observations(observations=observations)
T = self._get_T(observations=observations)
# Run KMeans
if kmeans is None:
if T <= 10**6:
kmeans = KMeans(n_clusters = self.num_states, **kwargs)
else:
kmeans = MiniBatchKMeans(n_clusters = self.num_states, **kwargs)
X = observations[:, 0, :].reshape((T, -1))
X_lagged = np.hstack([
X[max(lags)-lag:X.shape[0]-lag] for lag in lags
])
z = kmeans.fit_predict(X=X_lagged)
if z.size < T:
z = np.concatenate([np.zeros(T-z.size, dtype=int), z])
# Calculate Initial Param from KMeans init
init_parameters = self.init_parameters_from_z(z)
return init_parameters
def sample_z(self, parameters=None, observations=None, tqdm=None, **kwargs):
""" Sample Z """
if parameters is None:
parameters = self.parameters
if observations is None:
observations = self.observations
z = self.message_helper.latent_var_sample(
observations=observations,
parameters=parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
return z
def calc_z_prob(self, parameters=None, observations=None, tqdm=None,
**kwargs):
""" Calculate Posterior Marginal over Z """
if parameters is None:
parameters = self.parameters
if observations is None:
observations = self.observations
z_prob = self.message_helper.latent_var_distr(
observations=observations,
parameters=parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
return z_prob
def sample_gibbs(self, tqdm=None):
""" One Step of Blocked Gibbs Sampler
Returns:
parameters (ARPHMMParameters): sampled parameters after one step
"""
z = self.sample_z(tqdm=tqdm)
new_parameters = self.message_helper.parameters_gibbs_sample(
observations=self.observations,
latent_vars=z,
forward_message=self.forward_message,
backward_message=self.backward_message,
prior=self.prior,
)
self.parameters = new_parameters
return self.parameters
class SeqARPHMMSampler(SeqSGMCMCSampler, ARPHMMSampler):
pass
| 8,279 | 34.084746 | 103 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/arphmm/helper.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ..hmm_helper import HMMHelper
from .parameters import stack_y
from ..._utils import random_categorical
class ARPHMMHelper(HMMHelper):
""" ARPHMM Helper
forward_message (dict) with keys
prob_vector (ndarray) dimension num_states
log_constant (double) log scaling const
backward_message (dict) with keys
likelihood_vector (ndarray) dimension num_states
log_constant (double) log scaling const
y_next (ndarray) y_{t+1}
"""
def __init__(self, num_states, m, d,
forward_message=None,
backward_message=None,
**kwargs):
self.num_states = num_states
self.m = m
self.d = d
super().__init__(
forward_message=forward_message,
backward_message=backward_message,
**kwargs)
return
def y_sample(self, observations, parameters,
distr='joint', lag=None, num_samples=None,
forward_message=None, backward_message=None,
latent_vars=None, tqdm=None):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
if distr == 'joint' and lag is not None:
raise ValueError("Must set distr to 'marginal' for lag != None")
if (lag is None) or (lag > parameters.p):
# Handle Smoothing Case
if latent_vars is None:
latent_vars = self.latent_var_sample(
observations=observations,
parameters=parameters,
distr=distr,
lag=lag,
num_samples=num_samples,
forward_message=forward_message,
backward_message=backward_message,
latent_vars=latent_vars,
tqdm=tqdm,
)
D, R = parameters.D, parameters.R
L = latent_vars.shape[0]
if num_samples is None:
y = np.zeros((L, self.m))
z = latent_vars
for k in range(self.num_states):
num_k = np.sum(z==k)
if num_k == 0:
continue
mu_k = observations[z==k,1:].reshape(num_k, -1).dot(D[k])
y[z==k] = mu[k] + np.random.multivariate_normal(
mean=np.zeros(self.m), cov=R[k], size=num_k)
return y
else:
y = np.zeros((L, num_samples, self.m))
pbar = range(num_samples)
if tqdm is not None:
pbar = tqdm(pbar)
tqdm.set_description('sample y')
for s in pbar:
z = latent_vars[:,s]
for k in range(self.num_states):
num_k = np.sum(z==k)
if num_k == 0:
continue
mu_k = observations[z==k,1:].reshape(num_k, -1).dot(D[k])
y[z==k,s] = mu_k + np.random.multivariate_normal(
mean=np.zeros(self.m), cov=R[k], size=num_k)
y = np.swapaxes(y, 1, 2)
return y
else:
# Predictive Lag
raise NotImplementedError()
def simulate(self, T, parameters, init_message=None, num_samples=None,
include_init=True, tqdm=None):
if init_message is None:
init_message = self.default_forward_message
num_states, m, p = parameters.num_states, parameters.m, parameters.p
Pi, D, R = parameters.pi, parameters.D, parameters.R
# Outputs
if num_samples is not None:
latent_vars = np.zeros((T+1, num_samples), dtype=int)
obs_vars = np.zeros((T+p+1, num_samples, m), dtype=float)
y_prev = init_message.get('y_prev', np.zeros((p,m)))
y_prev = np.repeat(y_prev[np.newaxis,:,:], num_samples, 0)
obs_vars[:p] = y_prev.transpose(1,0,2)
else:
latent_vars = np.zeros((T+1), dtype=int)
obs_vars = np.zeros((T+p+1, m), dtype=float)
y_prev = init_message.get('y_prev', np.zeros((p,m)))
obs_vars[:p] = y_prev
latent_vars[0] = random_categorical(init_message['prob_vector'],
size=num_samples)
pbar = range(T+1)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("simulating data")
for t in pbar:
latent_prev = latent_vars[t]
if num_samples is None:
k = latent_prev
mu_k = y_prev.flatten().dot(D[k].T)
obs_vars[t+p] = \
mu_k + np.random.multivariate_normal(
mean=np.zeros(m),
cov=R[k],
)
if t < T+p+1:
latent_vars[t+1] = random_categorical(Pi[k])
y_prev = np.vstack([obs_vars[t], y_prev[:-1,:]])
else:
for k in range(num_states):
num_k = np.sum(latent_prev == k)
if num_k == 0:
continue
mu_k = y_prev[latent_prev == k].reshape(num_k, -1).dot(D[k].T)
obs_vars[t+p,latent_prev == k] = \
mu_k + np.random.multivariate_normal(
mean=np.zeros(m),
cov=R[k],
size=num_k
)
if t+1 < latent_vars.shape[0]:
latent_vars[t+1, latent_prev == k] = \
random_categorical(Pi[k], size=num_k)
y_prev = np.hstack([
obs_vars[t].reshape(num_samples,1,m),
y_prev[:,:-1,:]
])
if num_samples is not None:
obs_vars = np.array([stack_y(obs_vars[:,s,:],p)
for s in range(num_samples)])
obs_vars = np.transpose(obs_vars, (1,2,3,0))
else:
obs_vars = stack_y(obs_vars, p)
if include_init:
return dict(
observations=obs_vars,
latent_vars=latent_vars,
)
else:
return dict(
observations=obs_vars[1:],
latent_vars=latent_vars[1:],
)
def calc_gibbs_sufficient_statistic(self, observations, latent_vars,
**kwargs):
""" Gibbs Sample Sufficient Statistics
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): latent vars
Returns:
sufficient_stat (dict) containing:
alpha (ndarray) num_states by num_states, pairwise z counts
S_count (ndarray) num_states, z counts
S_prevprev (ndarray) num_states mp by mp, z counts
S_curprev (ndarray) num_states m by mp, y sum
S_curcur (ndarray) num_states m by m, yy.T sum
"""
z = latent_vars
# Sufficient Statistics for Pi
z_pair_count = np.zeros((self.num_states, self.num_states))
for t in range(1, np.size(z)):
z_pair_count[z[t-1], z[t]] += 1.0
# Sufficient Statistics for mu and R
S_count = np.zeros(self.num_states)
y_sum = np.zeros((self.num_states, self.m))
yy_prevprev = np.zeros((self.num_states, self.d, self.d))
yy_curprev = np.zeros((self.num_states, self.m, self.d))
yy_curcur = np.zeros((self.num_states, self.m, self.m))
for k in range(self.num_states):
S_count[k] = np.sum(z == k)
if S_count[k] == 0:
# No Sufficient Statistics for No Observations
continue
yk = observations[z==k, 0, :]
yprevk = np.reshape(observations[z==k, 1:, :], (yk.shape[0], -1))
# Sufficient Statistics for group k
y_sum[k] = np.sum(yk, axis=0)
yy_prevprev[k] = np.dot(yprevk.T, yprevk)
yy_curprev[k] = np.dot(yk.T, yprevk)
yy_curcur[k] = np.dot(yk.T, yk)
# Return sufficient Statistics
sufficient_stat = {}
sufficient_stat['pi'] = dict(alpha = z_pair_count)
sufficient_stat['D'] = dict(
S_prevprev = yy_prevprev,
S_curprev = yy_curprev,
)
sufficient_stat['R'] = dict(
S_count=S_count,
S_prevprev = yy_prevprev,
S_curprev = yy_curprev,
S_curcur=yy_curcur,
)
return sufficient_stat
def _emission_loglikelihoods(self, y_cur, parameters):
# y_cur should be p+1 by m,
loglikelihoods = np.zeros(self.num_states, dtype=float)
y_prev = y_cur[1:].flatten()
for k, (D_k, LRinv_k) in enumerate(
zip(parameters.D, parameters.LRinv)):
delta = y_cur[0] - np.dot(D_k, y_prev)
LRinvTdelta = np.dot(delta, LRinv_k)
loglikelihoods[k] = \
-0.5 * np.dot(LRinvTdelta, LRinvTdelta) + \
-0.5 * self.m * np.log(2*np.pi) + \
np.sum(np.log(np.abs(np.diag(LRinv_k))))
return loglikelihoods
def _emission_loglikelihood(self, y_cur, z_cur, parameters):
# y_cur should be p+1 by m,
loglikelihoods = np.zeros(self.num_states, dtype=float)
y_prev = y_cur[1:].flatten()
D_k, LRinv_k = parameters.D[z_cur], parameters.LRinv[z_cur]
delta = y_cur[0] - np.dot(D_k, y_prev)
LRinvTdelta = np.dot(delta, LRinv_k)
loglikelihood = \
-0.5 * np.dot(LRinvTdelta, LRinvTdelta) + \
-0.5 * self.m * np.log(2*np.pi) + \
np.sum(np.log(np.abs(np.diag(LRinv_k))))
return loglikelihood
def gradient_marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, weights=None,
tqdm=None):
# Forward Pass
forward_messages = self.forward_pass(observations, parameters,
forward_message, include_init_message=True)
# Backward Pass
backward_messages = self.backward_pass(observations, parameters,
backward_message, include_init_message=True)
# Gradients
grad = {var: np.zeros_like(value)
for var, value in parameters.as_dict().items()}
Pi, expanded_pi = parameters.pi, parameters.expanded_pi
D = parameters.D
LRinv, Rinv, R = parameters.LRinv, parameters.Rinv, parameters.R
pbar = enumerate(zip(forward_messages[:-1], backward_messages[1:]))
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("gradient loglike")
for t, (forward_t, backward_t) in pbar:
# r_t is Pr(z_{t-1} | y_{< t})
# s_t is Pr(z_t | y_{< t})
# q_t is Pr(y_{> t} | z_t)
r_t = forward_t['prob_vector']
s_t = np.dot(r_t, Pi)
q_t = backward_t['likelihood_vector']
weight_t = 1.0 if weights is None else weights[t]
# Calculate P_t = Pr(y_t | z_t)
y_cur = observations[t]
P_t, _ = self._likelihoods(
y_cur=y_cur,
parameters=parameters
)
# Marginal + Pairwise Marginal
joint_post = np.diag(r_t).dot(Pi).dot(np.diag(P_t*q_t))
joint_post = joint_post/np.sum(joint_post)
marg_post = np.sum(joint_post, axis=0)
# Grad for pi
if parameters.pi_type == "logit":
# Gradient of logit_pi
grad['logit_pi'] += weight_t * (joint_post - \
np.diag(np.sum(joint_post, axis=1)).dot(Pi))
elif parameters.pi_type == "expanded":
grad['expanded_pi'] += weight_t * np.array([
(expanded_pi[k]**-1)*(
joint_post[k] - np.sum(joint_post[k])*Pi[k])
for k in range(self.num_states)
])
else:
raise RuntimeError()
# grad for mu and LRinv
y_prev = y_cur[1:].flatten()
for k, D_k, LRinv_k, Rinv_k, R_k in zip(
range(self.num_states), D, LRinv, Rinv, R):
diff_k = y_cur[0] - np.dot(D_k, y_prev)
grad['D'][k] += weight_t * (
np.outer(Rinv_k.dot(diff_k), y_prev) * marg_post[k])
grad_LRinv_k = weight_t * (
(R_k - np.outer(diff_k, diff_k)).dot(LRinv_k)
) * marg_post[k]
grad['LRinv_vec'][k] += grad_LRinv_k[np.tril_indices(self.m)]
return grad
| 13,187 | 38.8429 | 82 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/arphmm/parameters.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...base_parameters import (
BaseParameters, BasePrior, BasePreconditioner,
)
from ...variables import (
TransitionMatrixParamHelper, TransitionMatrixPriorHelper,
TransitionMatrixPrecondHelper,
RectMatricesParamHelper, RectMatricesPriorHelper,
RectMatricesPrecondHelper,
CovariancesParamHelper, CovariancesPriorHelper,
CovariancesPrecondHelper,
)
from ..._utils import random_categorical
class ARPHMMParameters(BaseParameters):
""" AR(p) HMM Parameters """
_param_helper_list = [
TransitionMatrixParamHelper(name='pi', dim_names=['num_states', 'pi_type']),
RectMatricesParamHelper(name='D', dim_names=['m', 'd', 'num_states']),
CovariancesParamHelper(name='R', dim_names=['m', 'num_states']),
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "ARPHMMParameters:"
my_str += "\npi:\n" + str(self.pi)
my_str += "\npi_type: `" + str(self.pi_type) + "`"
my_str += "\nD:\n" + str(self.D)
my_str += "\nR:\n" + str(self.R)
return my_str
@property
def p(self):
return (self.d//self.m)
def project_parameters(self, **kwargs):
if 'D' not in kwargs:
kwargs['D'] = dict(thresh = True)
return super().project_parameters(**kwargs)
class ARPHMMPrior(BasePrior):
""" AR(p) HMM Prior
See individual Prior Mixins for details
"""
_Parameters = ARPHMMParameters
_prior_helper_list = [
CovariancesPriorHelper(name='R', dim_names=['m', 'num_states'],
matrix_name='D'),
TransitionMatrixPriorHelper(name='pi', dim_names=['num_states']),
RectMatricesPriorHelper(name='D',
dim_names=['m', 'd', 'num_states'],
var_row_name='R'),
]
class ARPHMMPreconditioner(BasePreconditioner):
""" AR(p) HMM Preconditioner
See individual Precondition Mixins for details
"""
_precond_helper_list = [
TransitionMatrixPrecondHelper(name='pi', dim_names=['num_states']),
RectMatricesPrecondHelper(name='D',
dim_names=['m', 'd', 'num_states'],
var_row_name='R'),
CovariancesPrecondHelper(name='R', dim_names=['m', 'num_states']),
]
def generate_arphmm_data(T, parameters, initial_message = None,
tqdm=None):
""" Helper function for generating ARPHMM time series
Args:
T (int): length of series
parameters (ARPHMMParameters): parameters
initial_message (ndarray): prior for u_{-1}
Returns:
data (dict): dictionary containing:
observations (ndarray): T by p+1 by m
latent_vars (ndarray): T takes values in {1,...,num_states}
parameters (ARPHMMParameters)
init_message (ndarray)
"""
num_states, m, mp = np.shape(parameters.D)
p = mp//m
D = parameters.D
R = parameters.R
Pi = parameters.pi
if initial_message is None:
initial_message = {
'prob_vector': np.ones(num_states)/num_states,
'log_constant': 0.0,
'y_prev': np.zeros((p,m))
}
latent_vars = np.zeros((T+p), dtype=int)
obs_vars = np.zeros((T+p, m))
latent_prev = random_categorical(initial_message['prob_vector'])
y_prev = initial_message.get('y_prev')
pbar = range(T)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("generating data")
for t in pbar:
latent_vars[t] = random_categorical(Pi[latent_prev])
D_k = D[latent_vars[t]]
R_k = R[latent_vars[t]]
obs_vars[t] = np.random.multivariate_normal(
mean=np.dot(D_k, y_prev.flatten()),
cov = R_k,
)
latent_prev = latent_vars[t]
y_prev = np.vstack([obs_vars[t], y_prev[:-1,:]])
observations = stack_y(obs_vars, p)
latent_vars = latent_vars[p:]
data = dict(
observations=observations,
latent_vars=latent_vars,
parameters=parameters,
initial_message=initial_message,
)
return data
def stack_y(y, p):
""" Stack y
Args:
y (ndarray): T+p by m matrix
Returns:
y_stacked (ndarray): T by p+1 by m matrix of the form
y_stacked[0] = [y[p], y[p-1], ..., y[0]]
y_stacked[1] = [y[p+1], y[p], ..., y[1]]
...
y_stacked[t] = [y[p+t], y[p+t-1], ..., y[t]]
...
y_stacked[T] = [y[p+T], y[p+T-1], ..., y[T]]
"""
if np.ndim(y) == 1:
y = np.array([y]).T
T, m = np.shape(y)
y_lags = [np.pad(y, ((0, lag), (0,0)), mode='constant')[lag:, :]
for lag in reversed(range(p+1))]
y_stacked = np.swapaxes(np.dstack(y_lags), 1, 2)[:T-p]
return y_stacked
| 5,128 | 32.522876 | 88 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/arphmm/__init__.py | from .parameters import (
ARPHMMParameters,
ARPHMMPrior,
ARPHMMPreconditioner,
generate_arphmm_data,
stack_y,
)
from .helper import ARPHMMHelper
from .sampler import ARPHMMSampler, SeqARPHMMSampler
| 222 | 21.3 | 52 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/gauss_hmm/sampler.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...sgmcmc_sampler import SGMCMCSampler, SeqSGMCMCSampler
from .parameters import GaussHMMParameters, GaussHMMPrior, GaussHMMPreconditioner
from .helper import GaussHMMHelper
class GaussHMMSampler(SGMCMCSampler):
def __init__(self, num_states, m, observations=None, prior=None,
parameters=None, forward_message=None, name="GaussHMMHelper",
**kwargs):
self.options = kwargs
self.num_states = num_states
self.m = m
self.name = name
self.setup(
observations=observations,
prior=prior,
parameters=parameters,
forward_message=forward_message,
)
return
def setup(self, observations, prior, parameters=None,
forward_message=None):
""" Initialize the sampler
Args:
observations (ndarray): T by m ndarray of time series values
prior (GAUSSHMMPrior): prior
forward_message (ndarray): prior probability for latent state
parameters (GAUSSHMMParameters): initial parameters
(optional, will sample from prior by default)
"""
self.observations = observations
if prior is None:
prior = GaussHMMPrior.generate_default_prior(
num_states=self.num_states, m=self.m,
)
self.prior = prior
if parameters is None:
self.parameters = self.prior.sample_prior()
else:
if not isinstance(parameters, GaussHMMParameters):
raise ValueError("parameters is not a GaussHMMParameter")
self.parameters = parameters
if forward_message is None:
forward_message = {
'prob_vector': np.ones(self.num_states) / \
self.num_states,
'log_constant': 0.0,
}
self.forward_message = forward_message
self.backward_message = {
'likelihood_vector': np.ones(self.num_states)/self.num_states,
'log_constant': np.log(self.num_states),
}
self.message_helper=GaussHMMHelper(
num_states=self.num_states,
m=self.m,
forward_message=forward_message,
backward_message=self.backward_message,
)
return
def _check_observation_shape(self, observations):
if observations is None:
return
if np.shape(observations)[1] != self.m:
raise ValueError("observations last dimension does not match m")
return
def _get_preconditioner(self, preconditioner=None):
if preconditioner is None:
preconditioner = GaussHMMPreconditioner()
return preconditioner
def init_parameters_from_z(self, z, observations=None):
""" Get initial parameters for the sampler
Args:
z (ndarray): latent var assignment
Return:
init_parameters (GAUSSHMMParameters): init_parameters
"""
observations = self._get_observations(observations)
# Check z is appropriate size
if np.shape(z)[0] != observations.shape[0]:
raise ValueError("z must be length of observations = {0}".format(observations.shape[0]))
if not np.issubdtype(z.dtype, np.integer):
raise ValueError("z must be integers, not {0}".format(z.dtype))
if np.max(z) >= self.num_states or np.min(z) < 0:
raise ValueError("z must be in (0, \ldots, {0}-1)".format(
self.num_states))
# Perform on Gibb Step
init_parameters = self.message_helper.parameters_gibbs_sample(
observations=observations,
latent_vars=z,
prior=self.prior,
)
self.parameters = init_parameters
return init_parameters
def init_parameters_from_k_means(self, lags=[0], kmeans=None,
observations=None, **kwargs):
""" Get initial parameters for the sampler
Use KMeans on data (treating observations as independent)
Each point is concat(y[lag] for lag in lags)
Args:
lags (list of indices): indices of lags to use for clustering
kmeans (sklearn model): e.g. sklearn.cluster.KMeans
**kwargs (dict): keyword args to pass to sklearn's kmean
"n_init" : int (default = 10)
"max_iter": int (default = 300)
"n_jobs" : int (default = 1)
See sklearn.cluster.KMeans for more
Returns:
init_parameters (GAUSSHMMParameters): init_parameters
"""
from sklearn.cluster import KMeans, MiniBatchKMeans
observations = self._get_observations(observations)
# Run KMeans
if kmeans is None:
if observations.shape[0] <= 10**6:
kmeans = KMeans(n_clusters = self.num_states, **kwargs)
else:
kmeans = MiniBatchKMeans(n_clusters = self.num_states, **kwargs)
X = observations.reshape((observations.shape[0], -1))
X_lagged = np.hstack([
X[max(lags)-lag:X.shape[0]-lag] for lag in lags
])
z = kmeans.fit_predict(X=X_lagged)
if z.size < observations.shape[0]:
z = np.concatenate([np.zeros(observations.shape[0]-z.size,
dtype=int), z])
# Calculate Initial Param from KMeans init
init_parameters = self.init_parameters_from_z(z, observations)
return init_parameters
def sample_z(self, parameters=None, observations=None, tqdm=None, **kwargs):
""" Sample Z """
if parameters is None:
parameters = self.parameters
if observations is None:
observations = self.observations
z = self.message_helper.latent_var_sample(
observations=observations,
parameters=parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
return z
def calc_z_prob(self, parameters=None, observations=None, tqdm=None,
**kwargs):
""" Calculate Posterior Marginal over Z """
if parameters is None:
parameters = self.parameters
if observations is None:
observations = self.observations
z_prob = self.message_helper.latent_var_distr(
observations=observations,
parameters=parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
return z_prob
def sample_gibbs(self, tqdm=None):
""" One Step of Blocked Gibbs Sampler
Returns:
parameters (GAUSSHMMParameters): sampled parameters after one step
"""
z = self.sample_z(tqdm=tqdm)
new_parameters = self.message_helper.parameters_gibbs_sample(
observations=self.observations,
latent_vars=z,
prior=self.prior,
)
self.parameters = new_parameters
return self.parameters
class SeqGaussHMMSampler(SeqSGMCMCSampler, GaussHMMSampler):
pass
| 7,485 | 34.478673 | 100 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/gauss_hmm/helper.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ..hmm_helper import HMMHelper
from ..._utils import random_categorical
class GaussHMMHelper(HMMHelper):
""" GaussHMM Helper
forward_message (dict) with keys
prob_vector (ndarray) dimension num_states
log_constant (double) log scaling const
backward_message (dict) with keys
likelihood_vector (ndarray) dimension num_states
log_constant (double) log scaling const
y_next (ndarray) y_{t+1}
"""
def __init__(self, num_states, m,
forward_message=None,
backward_message=None,
**kwargs):
self.num_states = num_states
self.m = m
super().__init__(
forward_message=forward_message,
backward_message=backward_message,
**kwargs)
return
def y_sample(self, observations, parameters,
distr='joint', lag=None, num_samples=None,
forward_message=None, backward_message=None,
latent_vars=None, tqdm=None):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
if latent_vars is None:
latent_vars = self.latent_var_sample(
observations=observations,
parameters=parameters,
distr=distr,
lag=lag,
num_samples=num_samples,
forward_message=forward_message,
backward_message=backward_message,
latent_vars=latent_vars,
tqdm=tqdm,
)
mu, R = parameters.mu, parameters.R
L = latent_vars.shape[0]
if num_samples is not None:
y = np.zeros((L, num_samples, self.m))
pbar = range(num_samples)
if tqdm is not None:
pbar = tqdm(pbar)
tqdm.set_description('sample y')
for s in pbar:
z = latent_vars[:,s]
for k in range(self.num_states):
y[z==k,s] = mu[k] + np.random.multivariate_normal(
mean=np.zeros(self.m), cov=R[k], size=np.sum(z==k))
y = np.swapaxes(y, 1, 2)
return y
else:
y = np.zeros((L, self.m))
z = latent_vars
for k in range(self.num_states):
y[z==k] = mu[k] + np.random.multivariate_normal(
mean=np.zeros(self.m), cov=R[k], size=np.sum(z==k))
return y
def calc_gibbs_sufficient_statistic(self, observations, latent_vars,
**kwargs):
""" Gibbs Sample Sufficient Statistics
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): latent vars
Returns:
sufficient_stat (dict) containing:
alpha_Pi (ndarray) num_states by num_states, pairwise z counts
S_count (ndarray) num_states, z counts
S_prevprev (ndarray) num_states, z counts
S_curprev (ndarray) num_states m, y sum
S_curcur (ndarray) num_states m by m, yy.T sum
"""
y, z = observations, latent_vars
# Sufficient Statistics for Pi
z_pair_count = np.zeros((self.num_states, self.num_states))
for t in range(1, np.size(z)):
z_pair_count[z[t-1], z[t]] += 1.0
# Sufficient Statistics for mu and R
S_count = np.zeros(self.num_states)
y_sum = np.zeros((self.num_states, self.m))
yy_curcur = np.zeros((self.num_states, self.m, self.m))
for k in range(self.num_states):
S_count[k] = np.sum(z == k)
if S_count[k] == 0:
# No Sufficient Statistics for No Observations
continue
yk = y[z==k, :]
# Sufficient Statistics for group k
y_sum[k] = np.sum(yk, axis=0)
yy_curcur[k] = np.dot(yk.T, yk)
# Return sufficient Statistics
sufficient_stat = {}
sufficient_stat['pi'] = dict(alpha = z_pair_count)
sufficient_stat['mu'] = dict(S_prevprev = S_count, S_curprev = y_sum)
sufficient_stat['R'] = dict(
S_count=S_count,
S_prevprev = S_count,
S_curprev = y_sum,
S_curcur=yy_curcur,
)
return sufficient_stat
def _emission_loglikelihoods(self, y_cur, parameters):
# y_cur should be m,
# mu is num_states by m
# LRinv is num_states by m by m
loglikelihoods = np.zeros(self.num_states, dtype=float)
for k, (mu_k, LRinv_k) in enumerate(
zip(parameters.mu, parameters.LRinv)):
delta = y_cur - mu_k
LRinvTdelta = np.dot(delta, LRinv_k)
loglikelihoods[k] = \
-0.5 * np.dot(LRinvTdelta, LRinvTdelta) + \
-0.5 * self.m * np.log(2*np.pi) + \
np.sum(np.log(np.diag(LRinv_k)))
return loglikelihoods
def _emission_loglikelihood(self, y_cur, z_cur, parameters):
mu_k, LRinv_k = parameters.mu[z_cur], parameters.LRinv[z_cur]
delta = y_cur - mu_k
LRinvTdelta = np.dot(delta, LRinv_k)
loglikelihood = \
-0.5 * np.dot(LRinvTdelta, LRinvTdelta) + \
-0.5 * self.m * np.log(2*np.pi) + \
np.sum(np.log(np.diag(LRinv_k)))
return loglikelihood
def gradient_marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None,
weights=None, use_scir=False, tqdm=None):
# Forward Pass
forward_messages = self.forward_pass(observations, parameters,
forward_message, include_init_message=True)
# Backward Pass
backward_messages = self.backward_pass(observations, parameters,
backward_message, include_init_message=True)
# Gradients
grad = {var: np.zeros_like(value)
for var, value in parameters.as_dict().items()}
Pi, expanded_pi = parameters.pi, parameters.expanded_pi
mu = parameters.mu
LRinv, Rinv, R = parameters.LRinv, parameters.Rinv, parameters.R
pbar = enumerate(zip(forward_messages[:-1], backward_messages[1:]))
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("gradient loglike")
for t, (forward_t, backward_t) in pbar:
# r_t is Pr(z_{t-1} | y_{< t})
# s_t is Pr(z_t | y_{< t})
# q_t is Pr(y_{> t} | z_t)
r_t = forward_t['prob_vector']
s_t = np.dot(r_t, Pi)
q_t = backward_t['likelihood_vector']
weight_t = 1.0 if weights is None else weights[t]
# Calculate P_t = Pr(y_t | z_t)
y_cur = observations[t]
P_t, _ = self._likelihoods(
y_cur=y_cur,
parameters=parameters
)
# Marginal + Pairwise Marginal
joint_post = np.diag(r_t).dot(Pi).dot(np.diag(P_t*q_t))
joint_post = joint_post/np.sum(joint_post)
marg_post = np.sum(joint_post, axis=0)
if use_scir:
# Sufficient statistics for Stochastic Cox-Ingersoll-Ross
if parameters.pi_type == 'logit':
grad['logit_pi'] = weight_t * joint_post
elif parameters.pi_type == 'expanded':
grad['expanded_pi'] = weight_t * joint_post
else:
raise RuntimeError()
else:
# Grad for pi
if parameters.pi_type == "logit":
# Gradient of logit_pi
grad['logit_pi'] += weight_t * (joint_post - \
np.diag(np.sum(joint_post, axis=1)).dot(Pi))
elif parameters.pi_type == "expanded":
grad['expanded_pi'] += weight_t * np.array([
(expanded_pi[k]**-1)*(
joint_post[k] - np.sum(joint_post[k])*Pi[k])
for k in range(self.num_states)
])
else:
raise RuntimeError()
# grad for mu and LRinv
for k, mu_k, LRinv_k, Rinv_k, R_k in zip(
range(self.num_states), mu, LRinv, Rinv, R):
diff_k = y_cur - mu_k
grad['mu'][k] += weight_t * Rinv_k.dot(diff_k) * marg_post[k]
grad_LRinv_k = weight_t * (
(R_k - np.outer(diff_k, diff_k)).dot(LRinv_k)
) * marg_post[k]
grad['LRinv_vec'][k] += grad_LRinv_k[np.tril_indices(self.m)]
return grad
| 8,999 | 37.961039 | 78 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/gauss_hmm/parameters.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...base_parameters import (
BaseParameters, BasePrior, BasePreconditioner,
)
from ...variables import (
TransitionMatrixParamHelper, TransitionMatrixPriorHelper,
TransitionMatrixPrecondHelper,
VectorsParamHelper, VectorsPriorHelper,
VectorsPrecondHelper,
CovariancesParamHelper, CovariancesPriorHelper,
CovariancesPrecondHelper,
)
class GaussHMMParameters(BaseParameters):
""" Gaussian HMM Parameters """
_param_helper_list = [
TransitionMatrixParamHelper(name='pi', dim_names=['num_states', 'pi_type']),
VectorsParamHelper(name='mu', dim_names=['m', 'num_states']),
CovariancesParamHelper(name='R', dim_names=['m', 'num_states']),
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "GaussHMMParameters:"
my_str += "\npi:\n" + str(self.pi)
my_str += "\npi_type: `" + str(self.pi_type) + "`"
my_str += "\nmu:\n" + str(self.mu)
my_str += "\nR:\n" + str(self.R)
return my_str
class GaussHMMPrior(BasePrior):
""" Gaussian HMM Prior
See individual Prior Mixins for details
"""
_Parameters = GaussHMMParameters
_prior_helper_list = [
CovariancesPriorHelper(name='R', dim_names=['m', 'num_states'], matrix_name='mu'),
TransitionMatrixPriorHelper(name='pi', dim_names=['num_states']),
VectorsPriorHelper(name='mu', dim_names=['m', 'num_states'],
var_row_name='R'),
]
class GaussHMMPreconditioner(BasePreconditioner):
""" Gaussian HMM Preconditioner
See individual Precondition Mixins for details
"""
_precond_helper_list = [
TransitionMatrixPrecondHelper(name='pi', dim_names=['num_states']),
VectorsPrecondHelper(name='mu', dim_names=['m', 'num_states'], var_row_name='R'),
CovariancesPrecondHelper(name='R', dim_names=['m', 'num_states']),
]
def generate_gausshmm_data(T, parameters, initial_message = None,
tqdm=None):
""" Helper function for generating Gaussian HMM time series
Args:
T (int): length of series
parameters (GAUSSHMMParameters): parameters
initial_message (ndarray): prior for u_{-1}
Returns:
data (dict): dictionary containing:
observations (ndarray): T by m
latent_vars (ndarray): T takes values in {1,...,num_states}
parameters (GaussHMMParameters)
init_message (ndarray)
"""
from ..._utils import random_categorical
k, m = np.shape(parameters.mu)
mu = parameters.mu
R = parameters.R
Pi = parameters.pi
if initial_message is None:
initial_message = {
'prob_vector': np.ones(k)/k,
'log_constant': 0.0,
}
latent_vars = np.zeros((T), dtype=int)
obs_vars = np.zeros((T, m))
latent_prev = random_categorical(initial_message['prob_vector'])
pbar = range(T)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("generating data")
for t in pbar:
latent_vars[t] = random_categorical(Pi[latent_prev])
mu_k = mu[latent_vars[t]]
R_k = R[latent_vars[t]]
obs_vars[t] = np.random.multivariate_normal(mean=mu_k, cov = R_k)
latent_prev = latent_vars[t]
data = dict(
observations=obs_vars,
latent_vars=latent_vars,
parameters=parameters,
initial_message=initial_message,
)
return data
| 3,793 | 33.490909 | 94 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/gauss_hmm/__init__.py | from .parameters import (
GaussHMMParameters,
GaussHMMPrior,
GaussHMMPreconditioner,
generate_gausshmm_data,
)
from .helper import GaussHMMHelper
from .sampler import GaussHMMSampler, SeqGaussHMMSampler
| 223 | 23.888889 | 56 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/lgssm/kernels.py | import numpy as np
from scipy.special import expit, logsumexp
from ...particle_filters.kernels import LatentGaussianKernel
# LGSSMKernels:
# Prior Kernel
class LGSSMPriorKernel(LatentGaussianKernel):
""" Prior Kernel for LGSSM
K(x_{t+1} | x_t) = Pr(x_{t+1} | x_t, parameters)
"""
def rv(self, x_t, **kwargs):
""" Prior Kernel for LGSSM
Sample x_{t+1} ~ Pr(x_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# x is vector
x_next_mean = x_t.dot(self.parameters.A.T)
x_next = np.linalg.solve(self.parameters.LQinv.T,
np.random.normal(size=x_t.shape).T).T + x_next_mean
return x_next
else:
# n = 1, x is scalar
x_next_mean = x_t * self.parameters.A
x_next = self.parameters.LQinv**-1 * np.random.normal(
size=x_t.shape) + x_next_mean
return x_next
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for Prior Kernel for LGSSM
weight_t = Pr(y_{t+1} | x_{t+1}, parameters)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# x is vector
diff = np.outer(self.y_next, np.ones(N)) - \
np.dot(self.parameters.C, x_next.T)
log_weights = \
-0.5*np.shape(self.parameters.LRinv)[0]*np.log(2.0*np.pi) + \
-0.5*np.sum(diff*np.dot(self.parameters.Rinv, diff), axis=0) + \
np.sum(np.log(np.diag(self.parameters.LRinv)))
else:
# n = 1, x is scalar
diff = self.y_next - self.parameters.C*x_next
log_weights = \
-0.5*np.log(2.0*np.pi) + \
-0.5*(diff**2)*self.parameters.Rinv + \
np.log(self.parameters.LRinv)
log_weights = np.reshape(log_weights, (N))
return log_weights
# "Optimal" instrumental Kernel
class LGSSMOptimalKernel(LatentGaussianKernel):
""" Optimal Instrumental Kernel for LGSSM
K(x_{t+1} | x_t) = Pr(x_{t+1} | x_t, y_{t+1}, parameters)
"""
def rv(self, x_t, **kwargs):
""" optimal Kernel for LGSSM
Sample x_{t+1} ~ Pr(x_{t+1} | x_t, y_{t+1}, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# x is vector
raise NotImplementedError()
else:
# n = 1, x is scalar
x_next_mean_precision = \
x_t * self.parameters.A * self.parameters.Qinv + \
self.y_next * self.parameters.C * self.parameters.Rinv
x_next_precision = \
self.parameters.Qinv + \
(self.parameters.C**2)*self.parameters.Rinv
x_next = (x_next_precision)**-0.5 * np.random.normal(
size=x_t.shape) + \
x_next_mean_precision/x_next_precision
return x_next
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for Optimal Kernel for LGSSM
weight_t = \Pr(y_t | x_{t-1}, parameters)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# x is vector
raise NotImplementedError()
else:
# n = 1, x is scalar
diff = self.y_next - self.parameters.A*x_t
variance = self.parameters.Qinv**-1 + self.parameters.Rinv**-1
log_weights = -0.5*(diff)**2 / variance - \
0.5*np.log(2.0*np.pi) - 0.5*np.log(variance)
log_weights = np.reshape(log_weights, (N))
return log_weights
# "Optimal" instrumental Kernel for high dimensional latent variables
class LGSSMHighDimOptimalKernel(LatentGaussianKernel):
def set_parameters(self, parameters):
self.parameters = parameters
self._param = dict(
AtQinv = np.dot(parameters.A.T, parameters.Qinv),
CtRinv = np.dot(parameters.C.T, parameters.Rinv),
CtRinvC = np.dot(parameters.C.T,
np.dot(parameters.Rinv, parameters.C)
),
)
x_next_Lvar = np.linalg.inv(np.linalg.cholesky(
parameters.Qinv + self._param['CtRinvC']
)).T
self._param['x_next_Lvar'] = x_next_Lvar
self._param['x_next_var'] = np.dot(x_next_Lvar, x_next_Lvar.T)
self._param['predictive_var'] = (self.parameters.R + np.dot(
self.parameters.C,
np.dot(self.parameters.Q, self.parameters.C.T),
))
self._param['predictive_var_logdet'] = np.linalg.slogdet(
self._param['predictive_var'])[1]
return
def rv(self, x_t, **kwargs):
""" optimal Kernel for LGSSM
Sample x_{t+1} ~ Pr(x_{t+1} | x_t, y_{t+1}, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# x is vector
x_next_mean = np.dot(
np.dot(x_t, self._param['AtQinv']) +
np.outer(
np.ones(np.shape(x_t)[0]),
np.dot(self._param['CtRinv'], self.y_next),
),
self._param['x_next_var']
)
x_next = np.dot(
np.random.normal(size=x_t.shape),
self._param['x_next_Lvar'].T,
) + x_next_mean
return x_next
else:
raise NotImplementedError()
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for Optimal Kernel for LGSSM
weight_t = \Pr(y_t | x_{t-1}, parameters)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# x is vector
diff = \
np.outer(np.ones(N), self.y_next) - np.dot(x_t, self.parameters.A)
log_weights = (
-0.5*np.sum(diff *
np.linalg.solve(self._param['predictive_var'], diff.T).T,
axis=1) +\
-0.5*np.shape(x_t)[1]*np.log(2.0) + \
-0.5*self._param['predictive_var_logdet']
)
return log_weights
else:
# n = 1, x is scalar
raise NotImplementedError()
# EOF
| 7,345 | 34.487923 | 82 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/lgssm/sampler.py | import numpy as np
from ...sgmcmc_sampler import SGMCMCSampler, SeqSGMCMCSampler
from .parameters import LGSSMPrior, LGSSMPreconditioner
from .helper import LGSSMHelper
class LGSSMSampler(SGMCMCSampler):
def __init__(self, n, m, observations=None, prior=None, parameters=None,
forward_message=None, backward_message=None, name="LGSSMSampler",
**kwargs):
self.options = kwargs
self.n = n
self.m = m
self.name = name
self.setup(
observations=observations,
prior=prior,
parameters=parameters,
forward_message=forward_message,
backward_message=backward_message,
)
return
def setup(self, observations=None, prior=None, parameters=None,
forward_message=None, backward_message=None):
self.observations = observations
if prior is None:
prior = LGSSMPrior.generate_default_prior(n=self.n, m=self.m)
self.prior = prior
if parameters is None:
self.parameters = self.prior.sample_prior()
else:
self.parameters = parameters
if forward_message is None:
forward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.eye(self.n)/10,
}
self.forward_message = forward_message
if backward_message is None:
backward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
}
self.backward_message = backward_message
self.message_helper=LGSSMHelper(
n=self.n,
m=self.m,
forward_message=forward_message,
backward_message=backward_message,
)
return
def _check_observation_shape(self, observations):
if observations is None:
return
if np.shape(observations)[1] != self.m:
raise ValueError("observations second dimension does not match m")
return
def _get_preconditioner(self, preconditioner=None):
if preconditioner is None:
preconditioner = LGSSMPreconditioner()
return preconditioner
def sample_x(self, observations=None, parameters=None, tqdm=None,
num_samples=None, **kwargs):
""" Sample X """
return self.predict(target='latent', kind='analytic', return_distr=False,
observations=observations, parameters=parameters,
num_samples=num_samples, tqdm=tqdm,
**kwargs,
)
def sample_gibbs(self, parameters=None, observations=None, tqdm=None):
""" One Step of Blocked Gibbs Sampler
Returns:
parameters (LGSSMParameters): sampled parameters after one step
"""
if parameters is None:
parameters = self.parameters
observations = self._get_observations(observations)
x = self.sample_x(parameters=parameters, observations=observations,
tqdm=tqdm)
new_parameters = self.message_helper.parameters_gibbs_sample(
observations=observations,
latent_vars=x,
prior=self.prior,
)
self.parameters = new_parameters
return self.parameters
class SeqLGSSMSampler(SeqSGMCMCSampler, LGSSMSampler):
pass
| 3,592 | 34.574257 | 81 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/lgssm/helper.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...sgmcmc_sampler import SGMCMCHelper
from .kernels import (
LGSSMPriorKernel,
LGSSMOptimalKernel,
LGSSMHighDimOptimalKernel,
)
from ...particle_filters.buffered_smoother import (
buffered_pf_wrapper,
average_statistic,
)
from ..._utils import lower_tri_mat_inv
class LGSSMHelper(SGMCMCHelper):
""" LGSSM Helper
forward_message (dict) with keys
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
backward_message (dict) with keys
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
"""
def __init__(self, n, m, forward_message=None, backward_message=None,
**kwargs):
self.n = n
self.m = m
if forward_message is None:
forward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.eye(self.n)/10,
}
self.default_forward_message=forward_message
if backward_message is None:
backward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
}
self.default_backward_message=backward_message
return
## Message Passing
def _forward_messages(self, observations, parameters, forward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of forward messages Pr(x_{t} | y_{<=t})
# y is num_obs x m matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
forward_messages = [None]*(num_obs+1)
forward_messages[0] = forward_message
mean_precision = forward_message['mean_precision']
precision = forward_message['precision']
log_constant = forward_message['log_constant']
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.dot(LQinv, LQinv.T)
AtQinv = np.dot(A.T, Qinv)
AtQinvA = np.dot(AtQinv, A)
C = parameters.C
LRinv = parameters.LRinv
Rinv = np.dot(LRinv, LRinv.T)
CtRinv = np.dot(C.T, Rinv)
CtRinvC = np.dot(CtRinv, C)
pbar = range(num_obs)
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description("forward messages")
for t in pbar:
y_cur = observations[t]
weight_t = 1.0 if weights is None else weights[t]
# Calculate Predict Parameters
J = np.linalg.solve(AtQinvA + precision, AtQinv)
pred_mean_precision = np.dot(J.T, mean_precision)
pred_precision = Qinv - np.dot(AtQinv.T, J)
# Calculate Observation Parameters
y_mean = np.dot(C,
np.linalg.solve(pred_precision, pred_mean_precision))
y_precision = Rinv - np.dot(CtRinv.T,
np.linalg.solve(CtRinvC + pred_precision, CtRinv))
log_c = (-0.5 * np.dot(y_cur-y_mean,
np.dot(y_precision, y_cur-y_mean)) + \
0.5 * np.linalg.slogdet(y_precision)[1] + \
-0.5 * self.m * np.log(2*np.pi))
log_constant += log_c * weight_t
# Calculate Filtered Parameters
new_mean_precision = pred_mean_precision + np.dot(CtRinv, y_cur)
new_precision = pred_precision + CtRinvC
# Save Messages
mean_precision = new_mean_precision
precision = new_precision
if not only_return_last:
forward_messages[t+1] = {
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}
if only_return_last:
last_message = {
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}
return last_message
else:
return forward_messages
def _backward_messages(self, observations, parameters, backward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of backward messages Pr(y_{>t} | x_t)
# y is num_obs x n matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
backward_messages = [None]*(num_obs+1)
backward_messages[-1] = backward_message
mean_precision = backward_message['mean_precision']
precision = backward_message['precision']
log_constant = backward_message['log_constant']
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.dot(LQinv, LQinv.T)
AtQinv = np.dot(A.T, Qinv)
AtQinvA = np.dot(AtQinv, A)
C = parameters.C
LRinv = parameters.LRinv
Rinv = np.dot(LRinv, LRinv.T)
CtRinv = np.dot(C.T, Rinv)
CtRinvC = np.dot(CtRinv, C)
pbar = reversed(range(num_obs))
if tqdm is not None:
pbar = tqdm(pbar, total=num_obs, leave=False)
pbar.set_description("backward messages")
for t in pbar:
y_cur = observations[t]
weight_t = 1.0 if weights is None else weights[t]
# Helper Values
xi = Qinv + precision + CtRinvC
L = np.linalg.solve(xi, AtQinv.T)
vi = mean_precision + np.dot(CtRinv, y_cur)
# Calculate new parameters
log_c = (-0.5 * self.m * np.log(2.0*np.pi) + \
np.sum(np.log(np.diag(LRinv))) + \
np.sum(np.log(np.diag(LQinv))) + \
-0.5 * np.linalg.slogdet(xi)[1] + \
-0.5 * np.dot(y_cur, np.dot(Rinv, y_cur)) + \
0.5 * np.dot(vi, np.linalg.solve(xi, vi)))
log_constant += log_c * weight_t
new_mean_precision = np.dot(L.T, vi)
new_precision = AtQinvA - np.dot(AtQinv, L)
# Save Messages
mean_precision = new_mean_precision
precision = new_precision
if not only_return_last:
backward_messages[t] = {
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}
if only_return_last:
last_message = {
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}
return last_message
else:
return backward_messages
## Loglikelihood Functions
def marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, weights=None,
tqdm=None, **kwargs):
# Run forward pass + combine with backward pass
# y is num_obs x n matrix
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
# forward_pass is Pr(x_{T-1} | y_{<=T-1})
forward_message = self._forward_message(
observations=observations,
parameters=parameters,
forward_message=forward_message,
weights=weights,
tqdm=tqdm,
**kwargs)
# Calculate the marginal loglikelihood of forward + backward message
f_mean_precision = forward_message['mean_precision']
f_precision = forward_message['precision']
c_mean_precision = f_mean_precision + backward_message['mean_precision']
c_precision = f_precision + backward_message['precision']
weight = 1.0 if weights is None else weights[-1]
loglikelihood = forward_message['log_constant'] + \
(backward_message['log_constant'] + \
+0.5 * np.linalg.slogdet(f_precision)[1] + \
-0.5 * np.linalg.slogdet(c_precision)[1] + \
-0.5 * np.dot(f_mean_precision,
np.linalg.solve(f_precision, f_mean_precision)
) + \
0.5 * np.dot(c_mean_precision,
np.linalg.solve(c_precision, c_mean_precision)
)
) * weight
return loglikelihood
def complete_data_loglikelihood(self, observations, latent_vars, parameters,
forward_message=None, weights=None, **kwargs):
if forward_message is None:
forward_message = self.default_forward_message
log_constant = 0.0
A = parameters.A
LQinv = parameters.LQinv
C = parameters.C
LRinv = parameters.LRinv
x_prev = forward_message.get('x_prev')
for t, (y_t, x_t) in enumerate(zip(observations, latent_vars)):
weight_t = 1.0 if weights is None else weights[t]
# Pr(X_t | X_t-1)
if (x_prev is not None):
diffLQinv = np.dot(x_t - np.dot(A,x_prev), LQinv)
log_c = (-0.5 * self.n * np.log(2*np.pi) + \
-0.5 * np.dot(diffLQinv, diffLQinv) + \
np.sum(np.log(np.diag(LQinv))))
log_constant += log_c * weight_t
# Pr(Y_t | X_t)
LRinvTymCx = np.dot(LRinv.T, y_t - np.dot(C, x_t))
log_c = (-0.5 * self.m * np.log(2*np.pi) + \
-0.5*np.dot(LRinvTymCx, LRinvTymCx) + \
np.sum(np.log(np.diag(LRinv))))
log_constant += log_c * weight_t
x_prev = x_t
return log_constant
def predictive_loglikelihood(self, observations, parameters, lag=1,
forward_message=None, backward_message=None, tqdm=None, **kwargs):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
# Calculate Filtered
if lag == 0:
forward_messages = self.forward_pass(observations,
parameters, forward_message, tqdm=tqdm, **kwargs)
else:
forward_messages = self.forward_pass(observations[0:-lag],
parameters, forward_message, tqdm=tqdm, **kwargs)
loglike = 0.0
A = parameters.A
Q = parameters.Q
C = parameters.C
R = parameters.R
pbar = range(lag, np.shape(observations)[0])
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description('predictive_loglikelihood')
for t in pbar:
# Calculate Pr(x_t | y_{<=t-lag}, theta)
mean_precision = forward_messages[t-lag]['mean_precision']
precision = forward_messages[t-lag]['precision']
mean = np.linalg.solve(precision, mean_precision)
var = np.linalg.inv(precision)
for l in range(lag):
mean = np.dot(A, mean)
var = np.dot(A, np.dot(var, A.T)) + Q
y_mean = np.dot(C, mean)
y_var = np.dot(C, np.dot(var, C.T)) + R
y_cur = observations[t]
log_like_t = -0.5 * np.dot(y_cur - y_mean,
np.linalg.solve(y_var, y_cur - y_mean)) + \
-0.5 * np.linalg.slogdet(y_var)[1] + \
-0.5 * self.m * np.log(2*np.pi)
loglike += log_like_t
return loglike
## Gradient Functions
def gradient_marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, weights=None,
include_init=True, tqdm=None):
A, LQinv, C, LRinv = \
parameters.A, parameters.LQinv, parameters.C, parameters.LRinv
# Forward Pass
# forward_messages = [Pr(x_{t} | y_{-inf:t}), y{t}] for t = -1,...,T-1
forward_messages = self.forward_pass(observations,
parameters, forward_message,
include_init_message=True)
# Backward Pass
# backward_messages = [Pr(y_{t+1:inf} | x_{t}), Y_{t}] for t =-1,...,T-1
backward_messages = self.backward_pass(observations,
parameters, backward_message,
include_init_message=True)
# Gradients
A_grad = np.zeros_like(A)
C_grad = np.zeros_like(C)
LQinv_grad = np.zeros_like(LQinv)
LRinv_grad = np.zeros_like(LRinv)
# Helper Constants
Rinv = np.dot(LRinv, LRinv.T)
RinvC = np.dot(Rinv, C)
CtRinvC = np.dot(C.T, RinvC)
LRinv_diaginv = np.diag(np.diag(LRinv)**-1)
Qinv = np.dot(LQinv, LQinv.T)
QinvA = np.dot(Qinv, A)
AtQinvA = np.dot(A.T, QinvA)
LQinv_diaginv = np.diag(np.diag(LQinv)**-1)
# Emission Gradients
p_bar = zip(forward_messages[1:], backward_messages[1:], observations)
if tqdm is not None:
p_bar = tqdm(p_bar, total=np.shape(observations)[0], leave=False)
p_bar.set_description("gradient loglike")
for t, (forward_t, backward_t, y_t) in enumerate(p_bar):
weight_t = 1.0 if weights is None else weights[t]
# Pr(x_t | y)
c_mean_precision = \
forward_t['mean_precision'] + backward_t['mean_precision']
c_precision = \
forward_t['precision'] + backward_t['precision']
x_mean = np.linalg.solve(c_precision, c_mean_precision)
xxt_mean = np.linalg.inv(c_precision) + np.outer(x_mean, x_mean)
# Gradient of C
C_grad += weight_t * (np.outer(np.dot(Rinv, y_t), x_mean) + \
-1.0 * np.dot(RinvC, xxt_mean))
# Gradient of LRinv
Cxyt = np.outer(np.dot(C, x_mean), y_t)
CxxtCt = np.dot(C, np.dot(xxt_mean, C.T))
LRinv_grad += weight_t * (LRinv_diaginv + \
-1.0*np.dot(np.outer(y_t, y_t) - Cxyt - Cxyt.T + CxxtCt, LRinv)
)
# Transition Gradients
if include_init:
pbar = zip(
forward_messages[:-1], backward_messages[1:], observations)
else:
pbar = zip(
forward_messages[1:-1], backward_messages[2:], observations[1:])
for t, (forward_t, backward_t, y_t) in enumerate(pbar):
weight_t = 1.0 if weights is None else weights[t]
# Pr(x_t, x_t+1 | y)
c_mean_precision = \
np.concatenate([
forward_t['mean_precision'],
backward_t['mean_precision'] + np.dot(RinvC.T,y_t)
])
c_precision = \
np.block([
[forward_t['precision'] + AtQinvA, -QinvA.T],
[-QinvA, backward_t['precision'] + CtRinvC + Qinv]
])
c_mean = np.linalg.solve(c_precision, c_mean_precision)
c_cov = np.linalg.inv(c_precision)
xp_mean = c_mean[0:self.n]
xn_mean = c_mean[self.n:]
xpxpt_mean = c_cov[0:self.n, 0:self.n] + np.outer(xp_mean, xp_mean)
xnxpt_mean = c_cov[self.n:, 0:self.n] + np.outer(xn_mean, xp_mean)
xnxnt_mean = c_cov[self.n:, self.n:] + np.outer(xn_mean, xn_mean)
# Gradient of A
A_grad += weight_t * np.dot(Qinv, xnxpt_mean - np.dot(A,xpxpt_mean))
# Gradient of LQinv
Axpxnt = np.dot(A, xnxpt_mean.T)
AxpxptAt = np.dot(A, np.dot(xpxpt_mean, A.T))
LQinv_grad += weight_t * (LQinv_diaginv + \
-1.0*np.dot(xnxnt_mean - Axpxnt - Axpxnt.T + AxpxptAt, LQinv))
grad = dict(
A=A_grad,
LQinv_vec=LQinv_grad[np.tril_indices_from(LQinv_grad)],
C=C_grad,
LRinv_vec=LRinv_grad[np.tril_indices_from(LRinv_grad)],
)
return grad
def gradient_complete_data_loglikelihood(self, observations, latent_vars,
parameters, forward_message=None, weights=None, tqdm=None,
**kwargs):
if forward_message is None:
forward_message = self.default_forward_message
A = parameters.A
LQinv = parameters.LQinv
Qinv = parameters.Qinv
LQinv_Tinv = lower_tri_mat_inv(LQinv).T
C = parameters.C
LRinv = parameters.LRinv
Rinv = parameters.Rinv
LRinv_Tinv = lower_tri_mat_inv(LRinv).T
# Gradients
grad = {var: np.zeros_like(value)
for var, value in parameters.as_dict().items()}
if len(np.shape(latent_vars)) == 2:
# Only One Sample
# Transition Gradients
x_prev = forward_message.get('x_prev')
for t, x_t in enumerate(latent_vars):
weight_t = 1.0 if weights is None else weights[t]
if x_prev is not None:
diff = x_t - np.dot(A, x_prev)
grad['A'] += weight_t * np.outer(np.dot(Qinv, diff), x_prev)
grad['LQinv'] += weight_t * (LQinv_Tinv + \
-1.0*np.dot(np.outer(diff, diff), LQinv))
x_prev = x_t
# Emission Gradients
for t, (x_t, y_t) in enumerate(zip(latent_vars, observations)):
weight_t = 1.0 if weights is None else weights[t]
diff = y_t - np.dot(C, x_t)
grad['C'] += weight_t * np.outer(np.dot(Rinv, diff), x_t)
grad['LRinv'] += weight_t * (LRinv_Tinv + \
-1.0*np.dot(np.outer(diff, diff), LRinv))
elif len(np.shape(latent_vars)) == 3:
# Average over Multiple Latent Vars
num_samples = np.shape(latent_vars)[2]
# Transition Gradients
x_prev = forward_message.get('x_prev')
for t, x_t in enumerate(latent_vars):
weight_t = 1.0 if weights is None else weights[t]
if x_prev is not None:
diff = x_t - np.dot(A, x_prev)
grad['A'] += weight_t * np.dot(Qinv,
np.dot(diff, x_prev.T))/num_samples
grad['LQinv'] += weight_t * (LQinv_Tinv + \
-1.0*np.dot(np.dot(diff, diff.T), LQinv)/num_samples)
x_prev = x_t
# Emission Gradients
for t, (x_t, y_t_) in enumerate(zip(latent_vars, observations)):
y_t = np.array([y_t_ for _ in range(num_samples)]).T
weight_t = 1.0 if weights is None else weights[t]
diff = y_t - np.dot(C, x_t)
grad['C'] += weight_t * (np.dot(Rinv,
np.dot(diff, x_t.T))/num_samples)
grad['LRinv'] += weight_t * (LRinv_Tinv + \
-1.0*np.dot(np.dot(diff, diff.T), LRinv)/num_samples)
else:
raise ValueError("Incorrect latent_var shape")
grad['LQinv_vec'] = grad.pop('LQinv')[np.tril_indices(self.n)]
grad['LRinv_vec'] = grad.pop('LRinv')[np.tril_indices(self.m)]
return grad
def gradient_loglikelihood(self, kind='marginal', **kwargs):
if kind == 'marginal':
return self.gradient_marginal_loglikelihood(**kwargs)
elif kind == 'complete':
return self.gradient_complete_data_loglikelihood(**kwargs)
else:
raise ValueError("Unrecognized `kind' {0}".format(kind))
## Gibbs Functions
def calc_gibbs_sufficient_statistic(self, observations, latent_vars,
**kwargs):
""" Gibbs Sample Sufficient Statistics
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): latent vars
Returns:
sufficient_stat (dict)
"""
x = latent_vars
y = observations
# Sufficient Statistics for A and Q
# From Emily Fox's Thesis Page 147
PsiT = x[1:]
PsiT_prev = x[:-1]
transition_count = len(PsiT)
Sx_prevprev = PsiT_prev.T.dot(PsiT_prev)
Sx_curprev = PsiT.T.dot(PsiT_prev)
Sx_curcur = PsiT.T.dot(PsiT)
# Sufficient Statistics for C and R
# From Emily Fox's Thesis Page 147
PsiT = y
PsiT_prev = x
emission_count = len(PsiT)
S_prevprev = PsiT_prev.T.dot(PsiT_prev)
S_curprev = PsiT.T.dot(PsiT_prev)
S_curcur = PsiT.T.dot(PsiT)
# Return sufficient Statistics
sufficient_stat = {}
sufficient_stat['A'] = dict(
S_prevprev=Sx_prevprev,
S_curprev=Sx_curprev,
)
sufficient_stat['Q'] = dict(
S_count=transition_count,
S_prevprev=Sx_prevprev,
S_curprev=Sx_curprev,
S_curcur=Sx_curcur,
)
sufficient_stat['R'] = dict(
S_count=emission_count,
S_prevprev=S_prevprev,
S_curprev=S_curprev,
S_curcur=S_curcur,
)
sufficient_stat['C'] = dict(
S_prevprev=S_prevprev,
S_curprev=S_curprev,
)
return sufficient_stat
## Predict Functions
def latent_var_distr(self, observations, parameters,
distr='marginal', lag=None,
forward_message=None, backward_message=None,
tqdm=None):
if distr != 'marginal':
raise NotImplementedError()
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
# Setup Output
L = np.shape(observations)[0]
mean = np.zeros((L, self.n))
cov = np.zeros((L, self.n, self.n))
# Forward Pass
forward_messages = self.forward_pass(
observations=observations,
parameters=parameters,
forward_message=forward_message,
tqdm=tqdm
)
pbar = range(L)
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description('calc latent var distr')
if lag is None:
# Smoothing
backward_messages = self.backward_pass(
observations=observations,
parameters=parameters,
backward_message=backward_message,
tqdm=tqdm
)
for t in pbar:
mean_precision = \
forward_messages[t]['mean_precision'] + \
backward_messages[t]['mean_precision']
precision = \
forward_messages[t]['precision'] + \
backward_messages[t]['precision']
mean[t] = np.linalg.solve(precision, mean_precision)
cov[t] = np.linalg.inv(precision)
return mean, cov
elif lag <= 0:
# Prediction/Filtering
A, Q = parameters.A, parameters.Q
for t in pbar:
if t+lag >= 0:
mean_precision = forward_messages[t+lag]['mean_precision']
precision = forward_messages[t+lag]['precision']
else:
mean_precision = forward_message['mean_precision']
precision = forward_message['precision']
mean_lag = np.linalg.solve(precision, mean_precision)
cov_lag = np.linalg.inv(precision)
# Forward Simulate
for _ in range(-lag):
mean_lag = np.dot(A, mean_lag)
cov_lag = np.dot(np.dot(A, cov_lag), A.T) + Q
mean[t] = mean_lag
cov[t] = cov_lag
return mean, cov
else:
# Fixed-lag Smoothing
for t in pbar:
# Backward Messages
back_obs = observations[t:min(t+lag, L)]
fixed_lag_message = self.backward_message(
observations=back_obs,
parameters=parameters,
backward_message=backward_message,
)
# Output
mean_precision = \
forward_messages[t]['mean_precision'] + \
fixed_lag_message['mean_precision']
precision = \
forward_messages[t]['precision'] + \
fixed_lag_message['precision']
mean[t] = np.linalg.solve(precision, mean_precision)
cov[t] = np.linalg.inv(precision)
return mean, cov
def latent_var_sample(self, observations, parameters,
forward_message=None, backward_message=None,
distr='joint', lag=None, num_samples=None,
tqdm=None, include_init=False, **kwargs):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
if distr == 'joint' and lag is not None:
raise ValueError("Must set distr to 'marginal' for lag != None")
A, LQinv = parameters.A, parameters.LQinv
AtQinv = np.dot(A.T, np.dot(LQinv, LQinv.T))
AtQinvA = np.dot(AtQinv, A)
if distr == 'joint':
# Forward Pass
forward_messages = self.forward_pass(
observations=observations,
parameters=parameters,
forward_message=forward_message,
include_init_message=include_init,
tqdm=tqdm
)
L = len(forward_messages)
if num_samples is not None:
x = np.zeros((L, self.n, num_samples))
else:
x = np.zeros((L, self.n))
# Backward Sampler
x_cov = np.linalg.inv(forward_messages[-1]['precision'])
x_mean = np.dot(x_cov, forward_messages[-1]['mean_precision'])
x[-1] = np.random.multivariate_normal(mean=x_mean, cov=x_cov,
size=num_samples).T
pbar = reversed(range(L-1))
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description("backward smoothed sampling x")
for t in pbar:
x_next = x[t+1]
x_cov = np.linalg.inv(forward_messages[t]['precision'] +
AtQinvA)
if num_samples is None:
x_mean = np.dot(x_cov,
forward_messages[t]['mean_precision'] + \
np.dot(AtQinv, x_next))
else:
x_mean = np.dot(x_cov,
np.outer(forward_messages[t]['mean_precision'],
np.ones(num_samples)) + \
np.dot(AtQinv, x_next))
x[t] = x_mean + np.random.multivariate_normal(
mean=np.zeros(self.n), cov=x_cov, size=num_samples,
).T
return x
elif distr == 'marginal':
# Calculate Distribution
x_mean, x_cov = self.latent_var_distr(observations, parameters,
lag=lag, forward_message=forward_message,
backward_message=backward_message, tqdm=tqdm,
)
# Sample from Distribution
L = x_mean.shape[0]
if num_samples is not None:
x = np.zeros((x_mean.shape[0], self.n, num_samples))
else:
x = np.zeros((x_mean.shape[0], self.n))
pbar = reversed(range(L))
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description("sampling x")
for t in pbar:
x[t] = x_mean[t] + np.random.multivariate_normal(
mean=np.zeros(self.n), cov=x_cov[t], size=num_samples,
).T
else:
raise ValueError("Unrecognized `distr'; {0}".format(distr))
return
def latent_var_pairwise_marginal(self, observations, parameters,
forward_message=None, backward_message=None,
distribution='smoothed', tqdm=None):
""" Calculate latent var marginal distribution
Args:
observations (ndarray): num_obs by n observations
parameters (LGSSMParameters): parameters
forward_message (dict): alpha message
(e.g. Pr(x_{-1} | y_{-inf:-1}))
backward_message (dict): beta message
(e.g. Pr(y_{T:inf} | x_{T-1}))
'likelihood_vector' (ndarray) dimension num_states
'y_next' (ndarray) dimension p by m, optional
distr (string): 'smoothed', 'filtered', 'predict'
smoothed: Pr(X | Y, theta)
filtered: Pr(X_t | Y_<=t, theta)
predictive:Pr(X_t | Y_<t, theta)
Returns
mean (ndarray): num_obs by 2n, pairwise mean (x_t, x_{t+1})
cov (ndarray): num_obs by 2n by 2n, pairwise covariance
"""
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
num_obs = np.shape(observations)[0]
mean = np.zeros((num_obs, 2*self.n))
cov = np.zeros((num_obs, 2*self.n, 2*self.n))
if distribution == 'smoothed':
forward_messages = self.forward_pass(
observations=observations,
parameters=parameters,
forward_message=forward_message,
include_init_message=True,
tqdm=tqdm
)
backward_messages = self.backward_pass(
observations=observations,
parameters=parameters,
backward_message=backward_message,
include_init_message=True,
tqdm=tqdm
)
# Helper Constants
C = parameters.C
Rinv = parameters.Rinv
RinvC = np.dot(Rinv, C)
CtRinvC = np.dot(C.T, RinvC)
A = parameters.A
Qinv = parameters.Qinv
QinvA = np.dot(Qinv, A)
AtQinvA = np.dot(A.T, QinvA)
for t in range(num_obs):
y_t = observations[t]
mean_precision = \
np.concatenate([
forward_messages[t]['mean_precision'],
backward_messages[t+1]['mean_precision'] + \
np.dot(RinvC.T, y_t)
])
precision = \
np.block([
[forward_messages[t]['precision'] + AtQinvA, -QinvA.T],
[-QinvA,
backward_messages[t+1]['precision'] + CtRinvC + Qinv]
])
mean[t] = np.linalg.solve(precision, mean_precision)
cov[t] = np.linalg.inv(precision)
return mean, cov
elif distribution == 'filtered':
raise NotImplementedError()
elif distribution == 'predictive':
raise NotImplementedError()
else:
raise ValueError("Invalid `distribution'; {0}".format(distribution))
return
def y_distr(self, observations, parameters,
distr='marginal', lag=None,
forward_message=None, backward_message=None,
latent_vars=None, tqdm=None):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
if latent_vars is not None:
x_mean, x_cov = self.latent_var_distr(
observations=observations,
parameters=parameters,
distr=distr,
lag=lag,
forward_message=forward_message,
backward_message=backward_message,
latent_vars=latent_vars,
tqdm=tqdm,
)
C, R = parameters.C, parameters.R
y_mean = x_mean.dot(C.T)
y_cov = np.array([C.dot(x_cov_t).dot(C.T) + R for x_cov_t in x_cov])
return y_mean, y_cov
else:
if lag is None or lag >= 0:
C, R = parameters.C, parameters.R
y_mean = latent_vars.dot(C.T)
y_cov = np.array([R for _ in range(L)])
return y_mean, y_cov
else:
L = observations.shape[0]
y_mean = np.nan(L, self.m)
y_cov = np.nan(L, self.m, self.m)
A, Q = parameters.A, parameters.Q
C, R = parameters.C, parameters.R
# Apply the transition + noise for lag steps
tran = np.eye(self.m)
cov = np.zeros(self.m, self.m)
for _ in range(-lag):
tran = A.dot(tran)
cov = A.dot(cov).dot(A.T) + Q
tran = C.dot(tran)
cov = C.dot(cov).dot(C.T) + R
# Calculate mean + cov based on latent_var[t+lag]
y_mean[-lag:] = latent_vars[:lag].dot(tran.T)
y_cov[-lag:] = np.array([cov for _ in range(L+lag)])
# Calculate mean + cov based on forward message
default_mean = np.linalg.solve(forward_messages['precision'],
forward_message['mean_precision'])
default_cov = np.linalg.inv(forward_message['precision'])
for _ in range(-lag):
default_cov = A.dot(default_cov).dot(A.T) + Q
default_cov = C.dot(default_cov).dot(C) + R
y_mean[:-lag] = np.array([default_mean.dot(tran.T)])
y_cov[:-lag] = np.array([default_cov for _ in range(-lag)])
return y_mean, y_cov
def y_sample(self, observations, parameters,
distr='marginal', lag=None, num_samples=None,
forward_message=None, backward_message=None,
latent_var=None, tqdm=None):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
if latent_vars is not None:
latent_vars = self.latent_var_sample(
observations=observations,
parameters=parameters,
distr=distr,
lag=lag,
num_samples=num_samples,
forward_message=forward_message,
backward_message=backward_message,
latent_vars=latent_vars,
tqdm=tqdm,
)
if num_samples is not None:
y = np.zeros((L, self.m, num_samples))
else:
y = np.zeros((L, self.m))
C, R = parameters.C, parameters.R
y = x.dot(C.T) + np.random.multivariate_normal(
mean=np.zeros(self.m), cov=R, size=num_samples).T
return y
def simulate_distr(self, T, parameters, include_init=True,
init_message=None, tqdm=None):
if init_message is None:
init_message = self.default_forward_message
m, n = np.shape(parameters.C)
A = parameters.A
C = parameters.C
Q = parameters.Q
R = parameters.R
# Outputs
latent_vars_mean = np.zeros((T+1, n), dtype=float)
latent_vars_cov = np.zeros((T+1, n, n), dtype=float)
obs_mean = np.zeros((T+1, m), dtype=float)
obs_cov = np.zeros((T+1, m, m), dtype=float)
# Init
latent_vars_mean[0] = np.linalg.solve(init_message['precision'],
init_message['mean_precision'])
latent_vars_cov[0] = np.linalg.inv(init_message['precision'])
obs_mean[0] = np.dot(C, latent_vars_mean[0])
obs_cov[0] = np.dot(C, latent_vars_cov[0]).dot(C.T) + R
pbar = range(1,T+1)
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description("simulating data")
for t in pbar:
latent_vars_mean[t] = np.dot(A, latent_vars_mean[t-1])
latent_vars_cov[t] = np.dot(A, latent_vars_cov[t-1]).dot(A.T) + Q
obs_mean[t] = np.dot(C, latent_vars_mean[t])
obs_cov[t] = np.dot(C, latent_vars_cov[t]).dot(C.T) + R
if include_init:
return dict(
obs_mean=obs_mean,
obs_cov=obs_cov,
latent_vars_mean=latent_vars_mean,
latent_vars_cov=latent_vars_cov,
)
else:
return dict(
obs_mean=obs_mean[1:],
obs_cov=obs_cov[1:],
latent_vars_mean=latent_vars_mean[1:],
latent_vars_cov=latent_vars_cov[1:],
)
def simulate(self, T, parameters, init_message=None, num_samples=None,
include_init=True, tqdm=None):
if init_message is None:
init_message = self.default_forward_message
m, n = np.shape(parameters.C)
A = parameters.A
C = parameters.C
Q = parameters.Q
R = parameters.R
# Outputs
if num_samples is not None:
latent_vars = np.zeros((T+1, num_samples, n), dtype=float)
obs_vars = np.zeros((T+1, num_samples, m), dtype=float)
else:
latent_vars = np.zeros((T+1, n), dtype=float)
obs_vars = np.zeros((T+1, m), dtype=float)
# Init
latent_vars[0] = np.random.multivariate_normal(
mean=np.linalg.solve(init_message['precision'],
init_message['mean_precision']),
cov=np.linalg.inv(init_message['precision']),
size=num_samples,
)
obs_vars[0] = np.dot(latent_vars[0], C.T) + \
np.random.multivariate_normal(
mean=np.zeros(m), cov=R, size=num_samples)
pbar = range(1,T+1)
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
pbar.set_description("simulating data")
for t in pbar:
latent_vars[t] = np.dot(latent_vars[t-1], A.T) + \
np.random.multivariate_normal(
mean=np.zeros(n), cov=Q, size=num_samples)
obs_vars[t] = np.dot(latent_vars[t], C.T) + \
np.random.multivariate_normal(
mean=np.zeros(m), cov=R, size=num_samples)
if num_samples is not None:
obs_vars = np.swapaxes(obs_vars, 1, 2)
latent_vars = np.swapaxes(latent_vars, 1, 2)
if include_init:
return dict(
observations=obs_vars,
latent_vars=latent_vars,
)
else:
return dict(
observations=obs_vars[1:],
latent_vars=latent_vars[1:],
)
## PF Functions
def pf_loglikelihood_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
if forward_message is None:
forward_message = self.default_forward_message
prior_var = np.linalg.inv(forward_message['precision'])
prior_mean = np.linalg.solve(prior_var,
forward_message['mean_precision'])
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=gaussian_sufficient_statistics,
statistic_dim=self.n+2*self.n**2,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
**kwargs
)
loglikelihood = out['loglikelihood_estimate']
return loglikelihood
def pf_predictive_loglikelihood_estimate(self, observations, parameters,
num_steps_ahead=1, subsequence_start=0, subsequence_end=None,
weights=None,
pf="filter", N=1000, kernel=None, forward_message=None,
**kwargs):
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
if forward_message is None:
forward_message = self.default_forward_message
prior_var = np.linalg.inv(forward_message['precision'])
prior_mean = np.linalg.solve(prior_var,
forward_message['mean_precision'])
from functools import partial
additive_statistic_func = partial(gaussian_predictive_loglikelihood,
num_steps_ahead=num_steps_ahead,
observations=observations,
)
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=additive_statistic_func,
statistic_dim=num_steps_ahead+1,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
logsumexp=True,
**kwargs
)
predictive_loglikelihood = out['statistics']
predictive_loglikelihood[0] = out['loglikelihood_estimate']
return predictive_loglikelihood
def pf_gradient_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
if forward_message is None:
forward_message = self.default_forward_message
prior_var = np.linalg.inv(forward_message['precision'])
prior_mean = np.linalg.solve(prior_var,
forward_message['mean_precision'])
# Run buffered pf
complete_grad_dim = 2*self.n**2+self.n*self.m+self.m**2
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=lgssm_complete_data_loglike_gradient,
statistic_dim=complete_grad_dim,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
**kwargs
)
grad_estimate = average_statistic(out)
if self.n*self.m > 1:
grad = dict(
LRinv_vec = grad_estimate[:(self.m+1)*self.m//2],
LQinv_vec = grad_estimate[(self.m+1)*self.m//2:
(self.m+1)*self.m//2+(self.n+1)*self.n//2],
C = np.reshape(grad_estimate[
(self.m+1)*self.m//2+(self.n+1)*self.n//2:
(self.m+1)*self.m//2+(self.n+1)*self.n//2+self.n*self.m],
(self.m, self.n),
),
A = np.reshape(grad_estimate[
(self.m+1)*self.m//2+(self.n+1)*self.n//2+self.n*self.m:],
(self.n, self.n),
),
)
else:
grad = dict(
LRinv_vec = grad_estimate[0],
LQinv_vec = grad_estimate[1],
C = grad_estimate[2],
A = grad_estimate[3],
)
return grad
def pf_latent_var_distr(self, observations, parameters, lag=None,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
if lag == 0 and pf != 'filter':
raise ValueError("pf must be filter for lag = 0")
elif lag is None and pf == 'filter':
raise ValueError("pf must not be filter for smoothing")
elif lag is not None and lag != 0:
raise NotImplementedError("lag can only be None or 0")
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
if forward_message is None:
forward_message = self.default_forward_message
prior_var = np.linalg.inv(forward_message['precision'])
prior_mean = np.linalg.solve(prior_var,
forward_message['mean_precision'])
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=gaussian_sufficient_statistics,
statistic_dim=self.n+2*self.n**2,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
elementwise_statistic=True,
**kwargs
)
avg_statistic = average_statistic(out)
if self.n > 1:
avg_statistic = np.reshape(avg_statistic, (-1, self.n+2*self.n**2))
x_mean = avg_statistic[:, 0:self.n]
x_cov = np.reshape(
avg_statistic[:, self.n:self.n+self.n**2],
(-1, self.n, self.n),
) - np.einsum('ij,ik->ijk', x_mean, x_mean)
else:
avg_statistic = np.reshape(avg_statistic, (-1, 3))
x_mean = avg_statistic[:, 0]
x_cov = avg_statistic[:, 1] - x_mean**2
x_mean = np.reshape(x_mean, (x_mean.shape[0], 1))
x_cov = np.reshape(x_cov, (x_cov.shape[0], 1, 1))
return x_mean, x_cov
def _get_kernel(self, kernel):
if kernel is None:
if self.n*self.m == 1:
kernel = 'optimal'
else:
kernel = 'highdim'
if kernel == "prior":
Kernel = LGSSMPriorKernel()
elif kernel == "optimal":
Kernel = LGSSMOptimalKernel()
elif kernel == "highdim":
Kernel = LGSSMHighDimOptimalKernel()
else:
raise ValueError("Unrecognized kernel = {0}".format(kernel))
return Kernel
# Additive Statistics
def lgssm_complete_data_loglike_gradient(x_t, x_next, y_next, parameters,
**kwargs):
""" Gradient of Complete Data Log-Likelihood
Gradient w/r.t. parameters of log Pr(y_{t+1}, x_{t+1} | x_t, parameters)
Args:
x_t (N by n ndarray): particles for x_t
x_next (N by n ndarray): particles for x_{t+1}
y_next (m ndarray): y_{t+1}
parameters (Parameters): parameters
Returns:
grad_complete_data_loglike (N by p ndarray):
gradient of complete data loglikelihood for particles
[ grad_LRinv_vec, grad_LQinv_vec, grad_C, grad_A ]
"""
N, n = np.shape(x_next)
m = np.shape(y_next)[0]
A = parameters.A
LQinv = parameters.LQinv
Qinv = parameters.Qinv
C = parameters.C
LRinv = parameters.LRinv
Rinv = parameters.Rinv
grad_complete_data_loglike = [None] * N
if (n != 1) or (m != 1):
LQinv_Tinv = np.linalg.inv(LQinv).T
LRinv_Tinv = np.linalg.inv(LRinv).T
for i in range(N):
grad = {}
diff = x_next[i] - np.dot(A, x_t[i])
grad['A'] = np.outer(
np.dot(Qinv, diff), x_t[i])
grad['LQinv_vec'] = (
LQinv_Tinv + -1.0*np.dot(np.outer(diff, diff), LQinv)
)[np.tril_indices(n)]
diff = y_next - np.dot(C, x_next[i])
grad['C'] = np.outer(np.dot(Rinv, diff), x_next[i])
grad['LRinv_vec'] = (
LRinv_Tinv + -1.0*np.dot(np.outer(diff, diff), LRinv)
)[np.tril_indices(m)]
grad_complete_data_loglike[i] = np.concatenate([
grad['LRinv_vec'].flatten(),
grad['LQinv_vec'].flatten(),
grad['C'].flatten(),
grad['A'].flatten(),
])
grad_complete_data_loglike = np.array(grad_complete_data_loglike)
else:
diff_x = x_next - A * x_t
grad_A = Qinv * diff_x * x_t
grad_LQinv = (LQinv**-1) - (diff_x**2) * LQinv
diff_y = y_next - C * x_next
grad_C = Rinv * diff_y * x_next
grad_LRinv = (LRinv**-1) - (diff_y**2) * LRinv
grad_complete_data_loglike = np.hstack([
grad_LRinv, grad_LQinv, grad_C, grad_A])
return grad_complete_data_loglike
def gaussian_predictive_loglikelihood(x_t, x_next, t, num_steps_ahead,
parameters, observations,
**kwargs):
""" Predictive Log-Likelihood
Calculate [Pr(y_{t+1+k} | x_{t+1} for k in [0,..., num_steps_ahead]]
Args:
x_t (N by n ndarray): particles for x_t
x_next (N by n ndarray): particles for x_{t+1}
num_steps_ahead
parameters (Parameters): parameters
observations (T by m ndarray): y
Returns:
predictive_loglikelihood (N by num_steps_ahead+1 ndarray)
"""
N, n = np.shape(x_next)
T, m = np.shape(observations)
predictive_loglikelihood = np.zeros((N, num_steps_ahead+1))
x_pred_mean = x_next + 0.0
x_pred_cov = np.zeros((n, n))
R, Q = parameters.R, parameters.Q
for k in range(num_steps_ahead+1):
if t+k >= T:
break
diff = (
np.outer(np.ones(N), observations[t+k]) - \
np.dot(x_pred_mean, parameters.C.T)
)
y_pred_cov = R + np.dot(parameters.C,
np.dot(x_pred_cov, parameters.C.T))
if m > 1:
pred_loglike = (
-0.5*np.sum(diff*np.linalg.solve(y_pred_cov, diff.T).T, axis=1)+\
-0.5*m*np.log(2.0*np.pi) +\
-0.5*np.linalg.slogdet(y_pred_cov)[1]
)
else:
pred_loglike = -0.5*diff**2/y_pred_cov + \
-0.5*np.log(2.0*np.pi) - 0.5*np.log(y_pred_cov)
pred_loglike = pred_loglike[:,0]
predictive_loglikelihood[:,k] = pred_loglike
x_pred_mean = np.dot(x_pred_mean, parameters.A.T)
x_pred_cov = Q + \
np.dot(parameters.A,
np.dot(x_pred_cov, parameters.A.T))
return predictive_loglikelihood
def gaussian_sufficient_statistics(x_t, x_next, y_next, **kwargs):
""" Gaussian Sufficient Statistics
h[0] = sum(x_{t+1})
h[1] = sum(x_{t+1} x_{t+1}^T)
h[2] = sum(x_t x_{t+1})
Args:
x_t (N by n ndarray): particles for x_t
x_next (N by n ndarray): particles for x_{t+1}
y_next (m ndarray): y_{t+1}
Returns:
h (N by p ndarray): sufficient statistic
"""
N = np.shape(x_t)[0]
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# x is vector
h = [x_next,
np.einsum('ij,ik->ijk', x_next, x_next),
np.einsum('ij,ik->ijk', x_t, x_next),
]
h = np.hstack([np.reshape(h_, (N, -1)) for h_ in h])
else:
# n = 1, x is scalar
h = np.hstack([x_next, x_next**2, x_t*x_next])
return h
| 52,767 | 37.629575 | 81 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/lgssm/parameters.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...base_parameters import (
BaseParameters, BasePrior, BasePreconditioner,
)
from ...variables import (
SquareMatrixParamHelper, SquareMatrixPriorHelper,
SquareMatrixPrecondHelper,
RectMatrixParamHelper, RectMatrixPriorHelper,
RectMatrixPrecondHelper,
CovarianceParamHelper, CovariancePriorHelper,
CovariancePrecondHelper,
)
from ..._utils import var_stationary_precision
class LGSSMParameters(BaseParameters):
""" LGSSM Parameters """
_param_helper_list = [
SquareMatrixParamHelper(name='A', dim_names=['n']),
RectMatrixParamHelper(name='C', dim_names=['m', 'n']),
CovarianceParamHelper(name='Q', dim_names=['n']),
CovarianceParamHelper(name='R', dim_names=['m']),
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "LGSSMParameters:"
my_str += "\nA:\n" + str(self.A)
my_str += "\nC:\n" + str(self.C)
my_str += "\nQ:\n" + str(self.Q)
my_str += "\nR:\n" + str(self.R)
return my_str
def project_parameters(self, **kwargs):
if 'C' not in kwargs:
kwargs['C'] = dict(fixed_eye=True)
return super().project_parameters(**kwargs)
class LGSSMPrior(BasePrior):
""" LGSSM Prior
See individual Prior Mixins for details
"""
_Parameters = LGSSMParameters
_prior_helper_list = [
CovariancePriorHelper(name='Q', dim_names=['n'], matrix_name='A'),
CovariancePriorHelper(name='R', dim_names=['m'], matrix_name='C'),
SquareMatrixPriorHelper(name='A', dim_names=['n'],
var_row_name='Q'),
RectMatrixPriorHelper(name='C', dim_names=['m', 'n'],
var_row_name='R'),
]
class LGSSMPreconditioner(BasePreconditioner):
""" LGSSM Preconditioner
See individual Precondition Mixins for details
"""
_precond_helper_list = [
SquareMatrixPrecondHelper(name='A', dim_names=['n'], var_row_name='Q'),
RectMatrixPrecondHelper(name='C', dim_names=['m', 'n'], var_row_name='R'),
CovariancePrecondHelper(name='Q', dim_names=['n']),
CovariancePrecondHelper(name='R', dim_names=['m']),
]
def generate_lgssm_data(T, parameters, initial_message = None,
tqdm=None):
""" Helper function for generating LGSSM time series
Args:
T (int): length of series
parameters (LGSSMParameters): parameters
initial_message (ndarray): prior for u_{-1}
Returns:
data (dict): dictionary containing:
observations (ndarray): T by m
latent_vars (ndarray): T by n
parameters (LGSSMParameters)
init_message (ndarray)
"""
m, n = np.shape(parameters.C)
A = parameters.A
C = parameters.C
Q = parameters.Q
R = parameters.R
if initial_message is None:
init_precision = var_stationary_precision(
parameters.Qinv, parameters.A, 10)
initial_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(n),
'precision': init_precision,
}
latent_vars = np.zeros((T, n), dtype=float)
obs_vars = np.zeros((T, m), dtype=float)
latent_prev = np.random.multivariate_normal(
mean=np.linalg.solve(initial_message['precision'],
initial_message['mean_precision']),
cov=np.linalg.inv(initial_message['precision']),
)
pbar = range(T)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("generating data")
for t in pbar:
latent_vars[t] = np.random.multivariate_normal(
mean=np.dot(A, latent_prev),
cov=Q,
)
obs_vars[t] = np.random.multivariate_normal(
mean=np.dot(C, latent_vars[t]),
cov=R,
)
latent_prev = latent_vars[t]
data = dict(
observations=obs_vars,
latent_vars=latent_vars,
parameters=parameters,
initial_message=initial_message,
)
return data
| 4,433 | 32.590909 | 86 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/lgssm/__init__.py | from .parameters import (
LGSSMParameters,
LGSSMPrior,
LGSSMPreconditioner,
generate_lgssm_data,
)
from .helper import LGSSMHelper
from .kernels import (
LGSSMPriorKernel,
LGSSMOptimalKernel,
LGSSMHighDimOptimalKernel,
)
from .sampler import LGSSMSampler, SeqLGSSMSampler
| 337 | 21.533333 | 50 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/slds/sampler.py | import numpy as np
from ...sgmcmc_sampler import SGMCMCSampler, SeqSGMCMCSampler
from .parameters import SLDSPrior, SLDSParameters, SLDSPreconditioner
from .helper import SLDSHelper
class SLDSSampler(SGMCMCSampler):
def __init__(self, num_states, n, m,
observations=None, prior=None, parameters=None,
forward_message=None, backward_message=None,
name="SLDSSampler", **kwargs):
self.options = kwargs
self.num_states = num_states
self.n = n
self.m = m
self.name = name
self.setup(
observations=observations,
prior=prior,
parameters=parameters,
forward_message=forward_message,
backward_message=backward_message,
)
return
def setup(self, observations, prior, parameters=None, forward_message=None,
backward_message=None):
""" Initialize the sampler
Args:
observations (ndarray): T by m ndarray of time series values
prior (SLDSPrior): prior
forward_message (ndarray): prior probability for latent state
parameters (SLDSParameters): initial parameters
(optional, will sample from prior by default)
"""
self.observations = observations
if prior is None:
prior = SLDSPrior.generate_default_prior(
num_states=self.num_states, n=self.n, m=self.m,
)
self.prior = prior
if parameters is None:
self.parameters = self.prior.sample_prior()
else:
if not isinstance(parameters, SLDSParameters):
raise ValueError("parameters is not a SLDSParameter")
self.parameters = parameters
if forward_message is None:
forward_message = {
'x': {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.eye(self.n)/10,
},
'z': {
'log_constant': 0.0,
'prob_vector': np.ones(self.num_states)/self.num_states,
},
}
self.forward_message = forward_message
if backward_message is None:
backward_message = {
'x': {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
},
'z': {
'log_constant': np.log(self.num_states),
'likelihood_vector':
np.ones(self.num_states)/self.num_states,
},
}
self.backward_message = backward_message
self.message_helper=SLDSHelper(
num_states=self.num_states,
n=self.n,
m=self.m,
forward_message=forward_message,
backward_message=backward_message,
)
return
def _check_observation_shape(self, observations):
if observations is None:
return
if np.shape(observations)[1] != self.m:
raise ValueError("observations second dimension does not match m")
return
def _get_preconditioner(self, preconditioner=None):
if preconditioner is None:
preconditioner = SLDSPreconditioner()
return preconditioner
def init_parameters_from_x_and_z(self, x, z):
""" Get initial parameters for the sampler
Args:
x (ndarray): latent var
z (ndarray): latent var
Return:
init_parameters (SLDSParameters): init_parameters
"""
# Check z is appropriate size
if np.shape(z)[0] != self._get_T():
raise ValueError("z must be length T = {0}".format(self._get_T()))
if not np.issubdtype(z.dtype, np.integer):
raise ValueError("z must be integers, not {0}".format(z.dtype))
if np.max(z) >= self.num_states or np.min(z) < 0:
raise ValueError("z must be in (0, \ldots, {0}-1)".format(
self.num_states))
# Check x is appropriate size
if np.shape(x)[0] != self._get_T() or np.shape(x)[1] != self.n:
raise ValueError("x must be size {0} not {1}".format(
(self._get_T(), self.n), np.shape(x)))
# Init on Gibb Step
init_parameters = self.message_helper.parameters_gibbs_sample(
observations=self.observations,
latent_vars=dict(x=x, z=z),
forward_message=self.forward_message,
backward_message=self.backward_message,
prior=self.prior,
)
self.parameters = init_parameters
return init_parameters
def init_parameters_from_k_means(self, x=None, lags=[0,1], kmeans=None, **kwargs):
""" Get initial parameters for the sampler
Use KMeans on data (treating observations as independent)
Each point is concat(y[lag] for lag in lags)
Args:
x (ndarray): initialization of latent variables
default is to use observations
lags (list of indices): indices of lags to use for clustering
kmeans (sklearn model): e.g. sklearn.cluster.KMeans
**kwargs (dict): keyword args to pass to sklearn's kmean
"n_init" : int (default = 10)
"max_iter": int (default = 300)
"n_jobs" : int (default = 1)
See sklearn.cluster.KMeans for more
Returns:
init_parameters (SLDSParameters): init_parameters
"""
from sklearn.cluster import KMeans, MiniBatchKMeans
# Run KMeans
if kmeans is None:
if self._get_T() <= 10**6:
kmeans = KMeans(n_clusters = self.num_states, **kwargs)
else:
kmeans = MiniBatchKMeans(n_clusters = self.num_states, **kwargs)
X = self.observations.reshape((self._get_T(), -1))
X_lagged = np.hstack([
X[max(lags)-lag:X.shape[0]-lag] for lag in lags
])
z = kmeans.fit_predict(X=X_lagged)
if z.size < self._get_T():
z = np.concatenate([np.zeros(self._get_T()-z.size, dtype=int), z])
if x is None:
x = self.observations
# Calculate Initial Param from KMeans init
init_parameters = self.init_parameters_from_x_and_z(x=x, z=z)
return init_parameters
def init_sample_latent(self, init_method=None, init_burnin=0,
parameters=None, observations=None, track_samples=True,
z_init=None):
""" Initialize latent variables
Args:
init_method (string)
'copy' - use observations as continuous latent variables
'filtered' - draw z_t, x_t conditional on z_<t, x_<t, y_<=t
'filteredZ' - draw z_t conditional on z_<t, y_<=t
'from_vector' - draw x conditional on given z
init_burnin (int): additional Gibbs sampling steps
z_init (ndarray): optional, for init_method == 'from_vector'
Returns:
latent_vars (dict):
x (ndarray)
z (ndarray)
"""
if observations is None:
observations = self.observations
if parameters is None:
parameters = self.parameters
if init_method is None:
# Set default init method
if self.n <= self.m:
init_method = 'copy'
if self.n > self.m:
init_method = 'filteredZ'
# Init Methods
if init_method == 'copy':
if self.n > self.m:
raise ValueError("Cannot use init_method = 'copy' since n > m")
z = self.sample_z(x=observations[:, 0:self.n],
parameters=parameters,
observations=observations,
track_samples=track_samples,
)
x = self.sample_x(z=z,
parameters=parameters,
observations=observations,
track_samples=track_samples,
)
elif init_method == 'filtered':
logger.warning("Executing <init_method == 'filtered'>")
logger.warning("Strongly recommend <init_method == 'filteredZ>")
x, z = self.message_helper.init_filter_naive(
y=observations,
parameters=parameters,
x_forward_message=self.forward_message['x'],
z_forward_message=self.forward_message['z'],
)
elif init_method == 'filteredZ':
z = self.message_helper.init_filter_z(
y=observations,
parameters=parameters,
x_forward_message=self.forward_message['x'],
z_forward_message=self.forward_message['z'],
)
x = self.sample_x(z=z,
parameters=parameters,
observations=observations,
track_samples=track_samples,
)
elif init_method == "from_vector":
if np.max(z_init) >= self.num_states:
raise ValueError("z_init contains more states than in model")
z = z_init.copy()
x = self.sample_x(z=z,
parameters=parameters,
observations=observations,
track_samples=track_samples,
)
else:
raise ValueError("Unrecognized init_method {0}".format(init_method))
for step in range(init_burnin):
z = self.sample_z(x=x,
parameters=parameters,
observations=observations,
track_samples=track_samples)
x = self.sample_x(z=z,
parameters=parameters,
observations=observations,
track_samples=track_samples,
)
return dict(x=x, z=z)
def sample_z(self, x=None, parameters=None, observations=None, tqdm=None,
track_samples=True):
""" Sample Z (given X)"""
if parameters is None:
parameters = self.parameters
if observations is None:
observations = self.observations
if x is None:
x = self.x
if np.shape(x)[0] != np.shape(observations)[0]:
raise ValueError("x and observations are different lengths")
if np.shape(x)[1] != self.n:
raise ValueError("x must be T by n ndarray")
z = self.message_helper._z_latent_var_sample(
observations=observations,
x=x,
parameters=parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
if track_samples:
self.z = z.copy()
return z
def sample_x(self, z=None, parameters=None, observations=None, tqdm=None,
track_samples=True):
""" Sample X (given Z)"""
if parameters is None:
parameters = self.parameters
if observations is None:
observations = self.observations
if z is None:
z = self.z
if np.shape(z)[0] != np.shape(observations)[0]:
raise ValueError("z and observations are different lengths")
if z.dtype != int:
raise ValueError("z must be ints")
x = self.message_helper._x_latent_var_sample(
observations=observations,
z=z,
parameters=parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
if track_samples:
self.x = x.copy()
return x
def sample_latent(self, x=None, z=None, num_rep=1, **kwargs):
""" Sample x, z for observations """
# Setup z and x
if x is None and z is None:
x, z = self.x, self.z
if z is None:
z = self.sample_z(x=x, **kwargs)
if x is None:
x = self.sample_x(z=z, **kwargs)
for rep in range(num_rep):
z = self.sample_z(x=x, **kwargs)
x = self.sample_x(z=z, **kwargs)
return dict(x=x, z=z)
def sample_gibbs(self, x=None, z=None, num_rep=1, **kwargs):
""" One Step of Blocked Gibbs Sampler
Returns:
parameters (LGSSMParameters): sampled parameters after one step
"""
latent_vars = self.sample_latent(x=x, z=z, num_rep=num_rep, **kwargs)
new_parameters = self.message_helper.parameters_gibbs_sample(
observations=self.observations,
latent_vars=latent_vars,
prior=self.prior,
)
self.parameters = new_parameters
return self.parameters
def noisy_loglikelihood(self, kind="complete",
subsequence_length=-1,
minibatch_size=1, buffer_length=10,
latent_draws=1, latent_thinning=5,
latent_burnin=5, latent_init=None,
**kwargs):
""" Approximation to loglikelihood (EM Lowerbound)
Args:
kind (string):
"complete" - logPr(Y, | theta, X, Z) (default)
"x_marginal" - logPr(Y | theta, X)
"z_marginal" - logPr(Y | theta, Z)
subsequence_length (int): length of subsequence used in evaluation
minibatch_size (int): number of subsequences
buffer_length (int): length of each subsequence buffer
latent_draws (int): number of latent variable Monte Carlo draws in
gradient approximation
latent_thinning (int): number of steps between samples
latent_burnin (int): number of burnin Gibb steps
latent_init (string): latent variable initialization method
See `self.init_sample_latent`
"""
noisy_loglike = 0.0
normalization_factor = 0.0
for s in range(0, minibatch_size):
# Get Subsequence and Buffer
subsequence = self._random_subsequence_and_buffers(buffer_length,
subsequence_length)
buffer_ = self.observations[subsequence['left_buffer_start']:
subsequence['right_buffer_end']]
subsequence['buffer'] = buffer_
if latent_init == "from_vector":
z_init = kwargs['z_init'][subsequence['left_buffer_start']:
subsequence['right_buffer_end']]
else:
z_init = None
# Run Blocked Gibbs on x_buffer, z_buffer
# Init
latent_buffer = self.init_sample_latent(
init_method=latent_init,
init_burnin=latent_burnin,
observations=buffer_,
track_samples=False,
z_init=z_init,
)
for draw in range(latent_draws):
if draw > 0 and latent_thinning > 0:
# Thinning
latent_buffer = self.sample_latent(
x=latent_buffer['x'], z=latent_buffer['z'],
num_rep=latent_thinning,
observations=buffer_,
track_samples=False,
)
# Subsequence Objective Estimate
noisy_loglike += self._subsequence_objective(
subsequence=subsequence,
x_buffer=latent_buffer['x'],
z_buffer=latent_buffer['z'],
kind=kind)
# Average over Minibatch + Draws
noisy_loglike *= 1.0/(minibatch_size*latent_draws)
return noisy_loglike
def _subsequence_objective(self, subsequence, x_buffer, z_buffer,
kind="complete"):
# Loglikelihood Approximation Calculator
start = (subsequence['subsequence_start'] - \
subsequence['left_buffer_start'])
end = (subsequence['subsequence_end'] - \
subsequence['left_buffer_start'])
y = subsequence['buffer'][start:end]
x = x_buffer[start:end]
z = z_buffer[start:end]
if kind == "complete":
forward_message = {}
if start > 0:
forward_message['x_prev'] = x_buffer[start-1]
forward_message['z_prev'] = z_buffer[start-1]
loglikelihood = self.message_helper._complete_data_loglikelihood(
observations=y, x=x, z=z, parameters=self.parameters,
forward_message=forward_message,
backward_message=self.backward_message,
weights=subsequence['weights'],
)
elif kind == "x_marginal":
forward_message = (self
.message_helper
.forward_message(
observations=subsequence['buffer'][0:start],
x=x_buffer[0:start],
parameters=self.parameters,
forward_message=self.forward_message,
))
forward_message['log_constant'] = \
self.forward_message['z']['log_constant']
loglikelihood = self.message_helper._z_marginal_loglikelihood(
observations=y, x=x, parameters=self.parameters,
forward_message=forward_message,
backward_message=self.backward_message,
weights=subsequence['weights'],
)
elif kind == "z_marginal":
forward_message = (self
.message_helper
.forward_message(
observations=subsequence['buffer'][0:start],
z=z_buffer[0:start],
parameters=self.parameters,
forward_message=self.forward_message,
))
forward_message['log_constant'] = \
self.forward_message['x']['log_constant']
loglikelihood = self.message_helper._x_marginal_loglikelihood(
observations=y, z=z, parameters=self.parameters,
forward_message=forward_message,
backward_message=self.backward_message,
weights=subsequence['weights'],
)
else:
raise ValueError("Unrecognized kind = {0}".format(kind))
return loglikelihood
def noisy_gradient(self, kind="complete",
subsequence_length=-1, minibatch_size=1, buffer_length=0,
latent_draws=1, latent_thinning=5, latent_burnin=5,
latent_init=None, preconditioner=None, is_scaled=True,
**kwargs):
""" Noisy Gradient Estimate
Monte Carlo Estimate of gradient (using buffering)
Runs Gibbs on buffered sequence
Args:
kind (string): type of gradient
"complete" - grad logPr(Y, Xhat, Zhat | theta) (default)
"x_marginal" - grad logPr(Y, Xhat | theta)
"z_marginal" - grad logPr(Y, Zhat | theta)
minibatch_size (int): number of subsequences
buffer_length (int): length of each subsequence buffer
latent_draws (int): number of latent variable Monte Carlo draws in
gradient approximation
latent_thinning (int): number of steps between samples
latent_burnin (int): number of burnin Gibb steps
latent_init (string): latent variable initialization method
See `self.init_sample_latent`
preconditioner (object): preconditioner for gradients
use_analytic (boolean): use analytic gradient instead of autograd
is_scaled (boolean): scale gradient by 1/T
Returns:
noisy_gradient (ndarray): gradient vector
"""
noisy_grad_loglike = \
self._noisy_grad_loglikelihood(
subsequence_length=subsequence_length,
minibatch_size=minibatch_size,
buffer_length=buffer_length,
kind=kind, latent_draws=latent_draws,
latent_thinning=latent_thinning,
latent_burnin=latent_burnin, latent_init=latent_init,
**kwargs)
noisy_grad_prior = self.prior.grad_logprior(
parameters=self.parameters)
noisy_gradient = {var: noisy_grad_prior[var] + noisy_grad_loglike[var]
for var in noisy_grad_prior}
if preconditioner is None:
if is_scaled:
for var in noisy_gradient:
noisy_gradient[var] /= self._get_T()
else:
scale = 1.0/self._get_T() if is_scaled else 1.0
noisy_gradient = preconditioner.precondition(noisy_gradient,
parameters=self.parameters,
scale=scale)
return noisy_gradient
def _noisy_grad_loglikelihood(self, subsequence_length=-1,
minibatch_size=1, buffer_length=0, kind='complete',
latent_draws=1, latent_thinning=5, latent_burnin=5, latent_init=None,
**kwargs):
# Noisy Gradient
noisy_grad = {var: np.zeros_like(value)
for var, value in self.parameters.as_dict().items()}
for s in range(0, minibatch_size):
# Get Subsequence and Buffer
subsequence = self._random_subsequence_and_buffers(buffer_length,
subsequence_length=subsequence_length)
buffer_ = self.observations[subsequence['left_buffer_start']:
subsequence['right_buffer_end']]
subsequence['buffer'] = buffer_
if latent_init == "from_vector":
z_init = kwargs['z_init'][
subsequence['left_buffer_start']:\
subsequence['right_buffer_end']
]
else:
z_init = None
# Run Blocked Gibbs on x_buffer, z_buffer
# Init
latent_buffer = self.init_sample_latent(
init_method=latent_init,
init_burnin = latent_burnin,
observations=subsequence['buffer'],
track_samples=False,
z_init=z_init,
)
for draw in range(latent_draws):
if draw > 0 and latent_thinning > 0:
# Thinning
latent_buffer = self.sample_latent(
x=latent_buffer['x'],
z=latent_buffer['z'],
num_rep=latent_thinning,
observations=subsequence['buffer'],
track_samples=False,
)
# Subsequence Gradient Estimate
noisy_grad_add = self._subsequence_gradient(
subsequence=subsequence,
x_buffer=latent_buffer['x'],
z_buffer=latent_buffer['z'],
kind=kind,
)
for var in noisy_grad:
noisy_grad[var] *= 1.0 / (minibatch_size*latent_draws)
if np.any(np.isnan(noisy_grad[var])):
raise ValueError("NaNs in gradient of {0}".format(var))
if np.linalg.norm(noisy_grad[var]) > 1e16:
logger.warning("Norm of noisy_grad_loglike[{1} > 1e16: {0}".format(
noisy_grad_loglike, var))
return noisy_grad
def _subsequence_gradient(self, subsequence, x_buffer, z_buffer, kind):
""" Forward + Backward Messages + Subsequence Gradient """
start = (subsequence['subsequence_start'] - \
subsequence['left_buffer_start'])
end = (subsequence['subsequence_end'] - \
subsequence['left_buffer_start'])
y = subsequence['buffer'][start:end]
x = x_buffer[start:end]
z = z_buffer[start:end]
if kind == "complete":
# Naive: grad log Pr(y, x, z | theta)
if start > 0:
forward_message = {
'x_prev': x_buffer[start-1],
'z_prev': z_buffer[start-1],
}
else:
forward_message = {}
if end < np.shape(subsequence['buffer'])[0]:
backward_message = {
'x_next': x_buffer[end],
'z_next': z_buffer[end],
}
else:
backward_message = {}
noisy_grad_loglike = (self
.message_helper
._gradient_complete_data_loglikelihood(
observations=y,
x=x, z=z,
parameters=self.parameters,
forward_message=forward_message,
backward_message=backward_message,
weights=subsequence['weights'],
))
elif kind == "x_marginal":
# X: grad log Pr(y, x | theta)
forward_message = (self
.message_helper
.forward_message(
observations=subsequence['buffer'][0:start],
x=x_buffer[0:start],
parameters=self.parameters,
forward_message=self.forward_message,
))
if end < np.shape(subsequence['buffer'])[0]:
backward_message = (self
.message_helper
.backward_message(
observations=subsequence['buffer'][end:],
x=x_buffer[end:],
parameters=self.parameters,
backward_message=self.backward_message,
))
else:
backward_message = self.backward_message
noisy_grad_loglike = (self
.message_helper
._z_gradient_marginal_loglikelihood(
observations=y, x=x,
parameters=self.parameters,
forward_message=forward_message,
backward_message=backward_message,
weights=subsequence['weights'],
))
elif kind == "z_marginal":
# Z: grad log Pr(y, z | theta)
forward_message = (self
.message_helper
.forward_message(
observations=subsequence['buffer'][0:start],
z=z_buffer[0:start],
parameters=self.parameters,
forward_message=self.forward_message,
))
if end < np.shape(subsequence['buffer'])[0]:
backward_message = (self
.message_helper
.backward_message(
observations=subsequence['buffer'][end:],
z=z_buffer[end:],
parameters=self.parameters,
backward_message=self.backward_message,
))
else:
backward_message = self.backward_message
gradient_kwargs = dict(
observations=y, z=z,
parameters=self.parameters,
forward_message=forward_message,
backward_message=backward_message,
)
noisy_grad_loglike = (self
.message_helper
._x_gradient_marginal_loglikelihood(
observations=y, z=z,
parameters=self.parameters,
forward_message=forward_message,
backward_message=backward_message,
weights=subsequence['weights'],
))
else:
raise ValueError("Unrecognized kind = {0}".format(kind))
return noisy_grad_loglike
| 29,068 | 39.206086 | 86 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/slds/helper.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...sgmcmc_sampler import SGMCMCHelper
from ..._utils import random_categorical, lower_tri_mat_inv
class SLDSHelper(SGMCMCHelper):
""" LGSSM Helper
forward_message (dict) with keys
x (dict):
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
z (dict):
prob_vector (ndarray) dimension num_states
log_constant (double) log scaling const
x_prev (ndarray)
z_prev (ndarray)
backward_message (dict) with keys
x (dict):
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
z (dict):
likelihood_vector (ndarray) dimension num_states
log_constant (double) log scaling const
x_next (ndarray)
z_next (ndarray)
"""
def __init__(self, num_states, n, m,
forward_message=None, backward_message=None,
**kwargs):
self.num_states = num_states
self.n = n
self.m = m
if forward_message is None:
forward_message = {
'x': {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.eye(self.n)/10,
},
'z': {
'log_constant': 0.0,
'prob_vector': np.ones(self.num_states)/self.num_states,
},
}
self.default_forward_message=forward_message
if backward_message is None:
backward_message = {
'x': {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
},
'z': {
'log_constant': np.log(self.num_states),
'likelihood_vector':
np.ones(self.num_states)/self.num_states,
},
}
self.default_backward_message=backward_message
return
def _forward_messages(self, observations, parameters, forward_message,
x=None, z=None, **kwargs):
if z is not None:
if x is not None:
raise ValueError("Either x or z can be conditioned on")
# Forward Messages conditioned on z
return self._x_forward_messages(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
**kwargs
)
elif x is not None:
# Forward Messages conditioned on z
return self._z_forward_messages(
observations=observations,
x=x,
parameters=parameters,
forward_message=forward_message,
**kwargs
)
else:
raise ValueError("Requires x or z be passed to condition on")
def _backward_messages(self, observations, parameters, backward_message, x=None, z=None, **kwargs):
if z is not None:
if x is not None:
raise ValueError("Either x or z can be conditioned on")
# Forward Messages conditioned on z
return self._x_backward_messages(
observations=observations,
z=z,
parameters=parameters,
backward_message=backward_message,
**kwargs
)
elif x is not None:
# Forward Messages conditioned on z
return self._z_backward_messages(
observations=observations,
x=x,
parameters=parameters,
backward_message=backward_message,
**kwargs
)
else:
raise ValueError("Requires x or z be passed to condition on")
## Helper Functions conditioned on z
def _x_forward_messages(self, observations, z, parameters, forward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of forward messages Pr(x_{t} | y_{<=t}, z)
# y is num_obs x m matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
forward_messages = [None]*(num_obs+1)
forward_messages[0] = forward_message
mean_precision = forward_message['x']['mean_precision']
precision = forward_message['x']['precision']
log_constant = forward_message['x']['log_constant']
z_prev = forward_message.get('z_prev', None)
Pi = parameters.pi
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
AtQinv = np.array([np.dot(A_k.T, Qinv_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(AtQinv_k, A_k)
for (A_k, AtQinv_k) in zip(A, AtQinv)])
C = parameters.C
LRinv = parameters.LRinv
Rinv = np.dot(LRinv, LRinv.T)
CtRinv = np.dot(C.T, Rinv)
CtRinvC = np.dot(CtRinv, C)
pbar = range(num_obs)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("forward messages")
for t in pbar:
y_cur = observations[t]
z_cur = z[t]
weight_t = 1.0 if weights is None else weights[t]
# Calculate Predict Parameters
J = np.linalg.solve(AtQinvA[z_cur] + precision, AtQinv[z_cur])
pred_mean_precision = np.dot(J.T, mean_precision)
pred_precision = Qinv[z_cur] - np.dot(AtQinv[z_cur].T, J)
# Calculate Observation Parameters
y_mean = np.dot(C,
np.linalg.solve(pred_precision, pred_mean_precision))
y_precision = Rinv - np.dot(CtRinv.T,
np.linalg.solve(CtRinvC + pred_precision, CtRinv))
log_constant += weight_t * (
-0.5 * np.dot(y_cur-y_mean,
np.dot(y_precision, y_cur-y_mean)) + \
0.5 * np.linalg.slogdet(y_precision)[1] + \
-0.5 * self.m * np.log(2*np.pi)
)
if z_prev is not None:
log_constant += weight_t * np.log(Pi[z_prev, z_cur])
# Calculate Filtered Parameters
new_mean_precision = pred_mean_precision + np.dot(CtRinv, y_cur)
new_precision = pred_precision + CtRinvC
# Save Messages
mean_precision = new_mean_precision
precision = new_precision
z_prev = z_cur
if not only_return_last:
forward_messages[t+1] = dict(
x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
},
z_prev=z_prev,
)
if only_return_last:
last_message = dict(
x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
},
z_prev=z_prev,
)
return last_message
else:
return forward_messages
def _x_backward_messages(self, observations, z, parameters, backward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of backward messages Pr(y_{>t} | x_t, z)
# y is num_obs x n matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
backward_messages = [None]*(num_obs+1)
backward_messages[-1] = backward_message
mean_precision = backward_message['x']['mean_precision']
precision = backward_message['x']['precision']
log_constant = backward_message['x']['log_constant']
z_next = backward_message.get('z_next', None)
Pi = parameters.pi
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
AtQinv = np.array([np.dot(A_k.T, Qinv_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(AtQinv_k, A_k)
for (A_k, AtQinv_k) in zip(A, AtQinv)])
C = parameters.C
LRinv = parameters.LRinv
Rinv = np.dot(LRinv, LRinv.T)
CtRinv = np.dot(C.T, Rinv)
CtRinvC = np.dot(CtRinv, C)
pbar = reversed(range(num_obs))
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("backward messages")
for t in pbar:
y_cur = observations[t]
z_cur = z[t]
weight_t = 1.0 if weights is None else weights[t]
# Helper Values
xi = Qinv[z_cur] + precision + CtRinvC
L = np.linalg.solve(xi, AtQinv[z_cur].T)
vi = mean_precision + np.dot(CtRinv, y_cur)
# Calculate new parameters
log_constant += weight_t * (
-0.5 * self.m * np.log(2.0*np.pi) + \
np.sum(np.log(np.diag(LRinv))) + \
np.sum(np.log(np.diag(LQinv[z_cur]))) + \
-0.5 * np.linalg.slogdet(xi)[1] + \
-0.5 * np.dot(y_cur, np.dot(Rinv, y_cur)) + \
0.5 * np.dot(vi, np.linalg.solve(xi, vi))
)
if z_next is not None:
log_constant += weight_t * np.log(Pi[z_cur, z_next])
new_mean_precision = np.dot(L.T, vi)
new_precision = AtQinvA[z_cur] - np.dot(AtQinv[z_cur], L)
# Save Messages
mean_precision = new_mean_precision
precision = new_precision
z_next = z_cur
if not only_return_last:
backward_messages[t] = dict(x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}, z_next=z_next)
if only_return_last:
last_message = dict(x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}, z_next=z_next)
return last_message
else:
return backward_messages
def _x_marginal_loglikelihood(self, observations, z, parameters,
forward_message=None, backward_message=None, weights=None,
**kwargs):
# Run forward pass + combine with backward pass
# y is num_obs x m matrix
# forward_pass is Pr(x_{T-1} | y_{<=T-1})
forward_pass = self._forward_message(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
weights=weights,
**kwargs)
weight_T = 1.0 if weights is None else weights[-1]
# Calculate the marginal loglikelihood of forward + backward message
f_mean_precision = forward_pass['x']['mean_precision']
f_precision = forward_pass['x']['precision']
c_mean_precision = f_mean_precision + backward_message['x']['mean_precision']
c_precision = f_precision + backward_message['x']['precision']
loglikelihood = forward_pass['x']['log_constant'] + \
(backward_message['x']['log_constant'] + \
+0.5 * np.linalg.slogdet(f_precision)[1] + \
-0.5 * np.linalg.slogdet(c_precision)[1] + \
-0.5 * np.dot(f_mean_precision,
np.linalg.solve(f_precision, f_mean_precision)
) + \
0.5 * np.dot(c_mean_precision,
np.linalg.solve(c_precision, c_mean_precision)
)
) * weight_T
z_next = backward_message.get('z_next')
z_prev = forward_pass.get('z_prev')
if (z_next is not None) and (z_prev is not None):
loglikelihood = loglikelihood + weight_T * np.log(
parameters.pi[z_prev, z_next])
return loglikelihood
def _x_gradient_marginal_loglikelihood(self, observations, z, parameters,
forward_message=None, backward_message=None, weights=None,
tqdm=None):
Pi, expanded_pi = parameters.pi, parameters.expanded_pi
A, LQinv, C, LRinv = \
parameters.A, parameters.LQinv, parameters.C, parameters.LRinv
# Forward Pass
# forward_messages = [Pr(x_{t} | z, y_{-inf:t}), y{t}] for t=-1,...,T-1
forward_messages = self.forward_pass(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
include_init_message=True)
# Backward Pass
# backward_messages = [Pr(y_{t+1:inf} | z,x_{t}), y{t}] for t=-1,...,T-1
backward_messages = self.backward_pass(
observations=observations,
z=z,
parameters=parameters,
backward_message=backward_message,
include_init_message=True)
# Gradients
grad = {var: np.zeros_like(value)
for var, value in parameters.as_dict().items()}
grad['LQinv'] = np.zeros_like(parameters.LQinv)
grad['LRinv'] = np.zeros_like(parameters.LRinv)
# Helper Constants
Rinv = np.dot(LRinv, LRinv.T)
RinvC = np.dot(Rinv, C)
CtRinvC = np.dot(C.T, RinvC)
LRinv_diaginv = np.diag(np.diag(LRinv)**-1)
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
QinvA = np.array([np.dot(Qinv_k, A_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(A_k.T, QinvA_k)
for (A_k, QinvA_k) in zip(A, QinvA)])
LQinv_diaginv = np.array([np.diag(np.diag(LQinv_k)**-1)
for LQinv_k in LQinv])
# Emission Gradients
p_bar = zip(forward_messages[1:], backward_messages[1:], observations)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("emission gradient loglike")
for t, (forward_t, backward_t, y_t) in enumerate(p_bar):
weight_t = 1.0 if weights is None else weights[t]
# Pr(x_t | y)
c_mean_precision = \
forward_t['x']['mean_precision'] + \
backward_t['x']['mean_precision']
c_precision = \
forward_t['x']['precision'] + backward_t['x']['precision']
x_mean = np.linalg.solve(c_precision, c_mean_precision)
xxt_mean = np.linalg.inv(c_precision) + np.outer(x_mean, x_mean)
# Gradient of C
grad['C'] += weight_t * (np.outer(np.dot(Rinv, y_t), x_mean) + \
-1.0 * np.dot(RinvC, xxt_mean))
# Gradient of LRinv
#raise NotImplementedError("SHOULD CHECK THE MATH FOR LRINV")
Cxyt = np.outer(np.dot(C, x_mean), y_t)
CxxtCt = np.dot(C, np.dot(xxt_mean, C.T))
grad['LRinv'] += weight_t * (LRinv_diaginv + \
-1.0*np.dot(np.outer(y_t, y_t) - Cxyt - Cxyt.T + CxxtCt, LRinv))
# Transition Gradients
p_bar = zip(forward_messages[0:-1], backward_messages[1:], observations, z)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("transition gradient loglike")
for t, (forward_t, backward_t, y_t, z_t) in enumerate(p_bar):
weight_t = 1.0 if weights is None else weights[t]
# Pr(x_t, x_t+1 | y)
c_mean_precision = \
np.concatenate([
forward_t['x']['mean_precision'],
backward_t['x']['mean_precision'] + np.dot(RinvC.T,y_t)
])
c_precision = \
np.block([
[forward_t['x']['precision'] + AtQinvA[z_t],
-QinvA[z_t].T],
[-QinvA[z_t],
backward_t['x']['precision'] + CtRinvC + Qinv[z_t]]
])
c_mean = np.linalg.solve(c_precision, c_mean_precision)
c_cov = np.linalg.inv(c_precision)
xp_mean = c_mean[0:self.n]
xn_mean = c_mean[self.n:]
xpxpt_mean = c_cov[0:self.n, 0:self.n] + np.outer(xp_mean, xp_mean)
xnxpt_mean = c_cov[self.n:, 0:self.n] + np.outer(xn_mean, xp_mean)
xnxnt_mean = c_cov[self.n:, self.n:] + np.outer(xn_mean, xn_mean)
# Gradient of A
grad['A'][z_t] += weight_t * (np.dot(Qinv[z_t],
xnxpt_mean - np.dot(A[z_t],xpxpt_mean)))
# Gradient of LQinv
Axpxnt = np.dot(A[z_t], xnxpt_mean.T)
AxpxptAt = np.dot(A[z_t], np.dot(xpxpt_mean, A[z_t].T))
grad['LQinv'][z_t] += weight_t * (LQinv_diaginv[z_t] + \
-1.0*np.dot(xnxnt_mean - Axpxnt - Axpxnt.T + AxpxptAt,
LQinv[z_t]))
# Latent State Gradients
z_prev = forward_message.get('z_prev') if forward_message is not None else None
for t, z_t in enumerate(z):
weight_t = 1.0 if weights is None else weights[t]
if z_prev is not None:
if parameters.pi_type == "logit":
logit_pi_grad_t = -Pi[z_prev] + 0.0
logit_pi_grad_t[z_t] += 1.0
grad['logit_pi'][z_prev] += weight_t * logit_pi_grad_t
elif parameters.pi_type == "expanded":
expanded_pi_grad_t = - Pi[z_prev] / expanded_pi[z_prev]
expanded_pi_grad_t[z_t] += 1.0 / expanded_pi[z_prev, z_t]
grad['expanded_pi'][z_prev] += weight_t * expanded_pi_grad_t
z_prev = z_t
grad['LQinv_vec'] = np.array([grad_LQinv_k[np.tril_indices(self.n)]
for grad_LQinv_k in grad.pop('LQinv')])
grad['LRinv_vec'] = grad.pop('LRinv')[np.tril_indices(self.m)]
return grad
def _x_predictive_loglikelihood(self, observations, z, parameters, lag=10,
forward_message=None, backward_message=None, **kwargs):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
# Calculate Filtered
if lag == 0:
forward_messages = self.forward_pass(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
**kwargs)
else:
forward_messages = self.forward_pass(
observations=observations[0:-lag],
z=z[0:-lag],
parameters=parameters,
forward_message=forward_message,
**kwargs)
loglike = 0.0
A = parameters.A
Q = parameters.Q
C = parameters.C
R = parameters.R
for t in range(lag, np.shape(observations)[0]):
y_cur = observations[t]
z_cur = z[t]
# Calculate Pr(x_t | y_{<=t-lag}, theta)
mean_precision = forward_messages[t-lag]['x']['mean_precision']
precision = forward_messages[t-lag]['x']['precision']
mean = np.linalg.solve(precision, mean_precision)
var = np.linalg.inv(precision)
for l in range(lag):
mean = np.dot(A[z_cur], mean)
var = np.dot(A[z_cur], np.dot(var, A[z_cur].T)) + Q[z_cur]
y_mean = np.dot(C, mean)
y_var = np.dot(C, np.dot(var, C.T)) + R
log_like_t = -0.5 * np.dot(y_cur - y_mean,
np.linalg.solve(y_var, y_cur - y_mean)) + \
-0.5 * np.linalg.slogdet(y_var)[1] + \
-0.5 * self.m * np.log(2*np.pi)
loglike += log_like_t
return loglike
def _x_latent_var_sample(self, observations, z, parameters,
forward_message=None, backward_message=None,
distribution='smoothed', tqdm=None):
""" Sample latent vars from observations
Args:
observations (ndarray): num_obs by n observations
z (ndarray): num_obs latent states
parameters (LGSSMParameters): parameters
forward_message (dict): alpha message
backward_message (dict): beta message
distr (string): 'smoothed', 'filtered', 'predict'
smoothed: sample X from Pr(X | Y, theta)
filtered: sample X_t from Pr(X_t | Y_<=t, theta) iid for all t
predictive: sample X_t from Pr(X_t | Y_<t, theta) iid for all t
Returns
x (ndarray): num_obs sampled latent values (in R^n)
"""
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
AtQinv = np.array([np.dot(A_k.T, Qinv_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(AtQinv_k, A_k)
for (A_k, AtQinv_k) in zip(A, AtQinv)])
L = np.shape(observations)[0]
if distribution == 'smoothed':
# Forward Pass
forward_messages = self.forward_pass(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
include_init_message=False,
tqdm=tqdm
)
# Backward Sampler
x = np.zeros((L, self.n))
x_cov = np.linalg.inv(forward_messages[-1]['x']['precision'])
x_mean = np.dot(x_cov, forward_messages[-1]['x']['mean_precision'])
x[-1, :] = np.random.multivariate_normal(mean=x_mean, cov=x_cov)
pbar = reversed(range(L-1))
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("backward smoothed sampling x")
for t in pbar:
x_next = x[t+1,:]
z_next = z[t+1]
x_cov = np.linalg.inv(forward_messages[t]['x']['precision'] +
AtQinvA[z_next])
x_mean = np.dot(x_cov,
forward_messages[t]['x']['mean_precision'] +
np.dot(AtQinv[z_next], x_next))
x[t,:] = np.random.multivariate_normal(x_mean, x_cov)
return x
elif distribution == 'filtered':
# Forward Pass (not a valid probability density)
forward_messages = self.forward_pass(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
include_init_message=False,
tqdm=tqdm
)
# Backward Sampler
x = np.zeros((L, self.n))
pbar = range(L)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("filtered sampling x")
for t in pbar:
x_cov = np.linalg.inv(forward_messages[t]['x']['precision'])
x_mean = np.dot(x_cov,
forward_messages[t]['x']['mean_precision'])
x[t,:] = np.random.multivariate_normal(x_mean, x_cov)
return x
elif distribution == 'predictive':
# Forward Sampler (not a valid probability density)
forward_messages = self.forward_pass(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
include_init_message=True,
tqdm=tqdm
)
# Backward Sampler
x = np.zeros((L, self.n))
Q = parameters.Q
pbar = range(L)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("predictive sampling x")
for t in pbar:
z_cur = z[t]
x_prev_cov = np.linalg.inv(
forward_messages[t]['x']['precision'])
x_prev_mean = np.dot(x_prev_cov,
forward_messages[t]['x']['mean_precision'])
x_cov = np.dot(A[z_cur],
np.dot(x_prev_cov, A[z_cur].T)) + Q[z_cur]
x_mean = np.dot(A[z_cur], x_prev_mean)
x[t,:] = np.random.multivariate_normal(x_mean, x_cov)
return x
else:
raise ValueError("Invalid `distribution'; {0}".format(distribution))
return
## Helper Functions conditioned on x
def _z_forward_messages(self, observations, x, parameters, forward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of forward messages Pr(z_{t}, y_{<=t}, x)
# y is num_obs x m matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
forward_messages = [None]*(num_obs+1)
forward_messages[0] = forward_message
prob_vector = forward_message['z']['prob_vector']
log_constant = forward_message['z']['log_constant']
x_prev = forward_message.get('x_prev', None)
Pi = parameters.pi
C = parameters.C
LRinv = parameters.LRinv
pbar = range(num_obs)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("forward messages")
for t in pbar:
y_cur = observations[t]
x_cur = x[t]
weight_t = 1.0 if weights is None else weights[t]
# Log Pr(Y | X)
LRinvTymCx = np.dot(LRinv.T, y_cur - np.dot(C, x_cur))
log_constant += weight_t * (
-0.5 * self.m * np.log(2*np.pi) + \
-0.5*np.dot(LRinvTymCx, LRinvTymCx) + \
np.sum(np.log(np.diag(LRinv)))
)
if x_prev is None:
# Assume Non-informative prior for y_0
prob_vector = np.dot(prob_vector, Pi)
else:
# Log Pr(X | X_prev)
P_t, log_t = self._likelihoods(
x_cur, x_prev, parameters)
prob_vector = np.dot(prob_vector, Pi)
prob_vector = prob_vector * P_t
log_constant += weight_t * (log_t + np.log(np.sum(prob_vector)))
prob_vector = prob_vector/np.sum(prob_vector)
# Save Messages
x_prev = x_cur
if not only_return_last:
forward_messages[t+1] = dict(
z={
'prob_vector': prob_vector,
'log_constant': log_constant,
},
x_prev=x_prev,
)
if only_return_last:
last_message = dict(
z={
'prob_vector': prob_vector,
'log_constant': log_constant,
},
x_prev=x_prev,
)
return last_message
else:
return forward_messages
def _z_backward_messages(self, observations, x, parameters,
backward_message, weights=None, tqdm=None, only_return_last=False):
# Return list of backward messages Pr(y_{>t} | x_t)
# y is num_obs x n matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
backward_messages = [None]*(num_obs+1)
backward_messages[-1] = backward_message
prob_vector = backward_message['z']['likelihood_vector']
log_constant = backward_message['z']['log_constant']
x_next = backward_message.get('x_next', None)
Pi = parameters.pi
C = parameters.C
LRinv = parameters.LRinv
pbar = reversed(range(num_obs))
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("backward messages")
for t in pbar:
y_cur = observations[t]
x_cur = x[t]
weight_t = 1.0 if weights is None else weights[t]
# Log Pr(Y_cur | X_cur )
LRinvTymCx = np.dot(LRinv.T, y_cur - np.dot(C, x_cur))
log_constant += weight_t * (
-0.5 * self.m * np.log(2*np.pi) + \
-0.5*np.dot(LRinvTymCx, LRinvTymCx) + \
np.sum(np.log(np.diag(LRinv)))
)
if x_next is None:
prob_vector = np.dot(Pi, prob_vector)
log_constant += weight_t * np.log(np.sum(prob_vector))
prob_vector = prob_vector/np.sum(prob_vector)
else:
# Log Pr(X_next | X_cur)
P_t, log_t = self._likelihoods(
x_next, x_cur, parameters)
prob_vector = P_t * prob_vector
prob_vector = np.dot(Pi, prob_vector)
log_constant += weight_t * (log_t + \
np.log(np.sum(prob_vector)))
prob_vector = prob_vector/np.sum(prob_vector)
# Save Messages
x_next = x_cur
if not only_return_last:
backward_messages[t] = dict(z={
'likelihood_vector': prob_vector,
'log_constant': log_constant,
}, x_next=x_next)
if only_return_last:
last_message = dict(z={
'likelihood_vector': prob_vector,
'log_constant': log_constant,
}, x_next=x_next)
return last_message
else:
return backward_messages
def _z_marginal_loglikelihood(self, observations, x, parameters,
forward_message=None, backward_message=None, weights=None,
**kwargs):
# Run forward pass + combine with backward pass
# y is num_obs x m matrix
# forward_pass is Pr(z_{T-1} | x_{<=T-1}, y_{<=T-1})
forward_pass = self._forward_message(
observations=observations,
x=x,
parameters=parameters,
forward_message=forward_message,
weights=weights,
**kwargs)
Pi = parameters.pi
x_prev = forward_pass.get('x_prev')
x_cur = backward_message.get('x_next')
prob_vector = forward_pass['z']['prob_vector']
log_constant = forward_pass['z']['log_constant']
prob_vector = np.dot(prob_vector, Pi)
weight_T = 1.0 if weights is None else weights[-1]
if (x_cur is not None) and (x_prev is not None):
P_t, log_t = self._likelihoods(
x_cur, x_prev, parameters,
)
prob_vector = P_t * prob_vector
log_constant += weight_T * log_t
log_constant += weight_T * backward_message['z']['log_constant']
likelihood = np.dot(prob_vector,
backward_message['z']['likelihood_vector'])
loglikelihood = weight_T * np.log(likelihood) + log_constant
return loglikelihood
def _z_gradient_marginal_loglikelihood(self, observations, x, parameters,
forward_message=None, backward_message=None, weights=None,
tqdm=None):
Pi, expanded_pi = parameters.pi, parameters.expanded_pi
A, LQinv, C, LRinv = \
parameters.A, parameters.LQinv, parameters.C, parameters.LRinv
Rinv = parameters.Rinv
Q, Qinv = parameters.Q, parameters.Qinv
LRinv_Tinv = lower_tri_mat_inv(LRinv).T
# Forward Pass
# forward_messages = [Pr(z_{t} | y_{-inf:t}, x_{-inf:t})]
# for t=-1:T-1
forward_messages = self.forward_pass(
observations=observations,
x=x,
parameters=parameters,
forward_message=forward_message,
include_init_message=True)
# Backward Pass
# backward_messages = [Pr(y_{t:inf}, x_{t:inf} | z_{t}), x_{t}]
# for t=0:T
backward_messages = self.backward_pass(
observations=observations,
x=x,
parameters=parameters,
backward_message=backward_message,
include_init_message=True)
# Gradients
grad = {var: np.zeros_like(value)
for var, value in parameters.as_dict().items()}
grad['LQinv'] = np.zeros_like(parameters.LQinv)
grad['LRinv'] = np.zeros_like(parameters.LRinv)
# Transition Gradients
p_bar = zip(forward_messages[:-1], backward_messages[1:], observations, x)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("gradient loglike")
for t, (forward_t, backward_t, y_t, x_t) in enumerate(p_bar):
# r_t is Pr(z_{t-1} | y_{< t})
# s_t is Pr(z_t | y_{< t})
# q_t is Pr(y_{> t} | z_t)
r_t = forward_t['z']['prob_vector']
s_t = np.dot(r_t, Pi)
q_t = backward_t['z']['likelihood_vector']
weight_t = 1.0 if weights is None else weights[t]
x_prev = forward_t.get('x_prev', None)
x_cur = x_t
if (x_prev is not None) and (x_cur is not None):
P_t, _ = self._likelihoods(
x_cur, x_prev, parameters
)
else:
P_t = np.ones(self.num_states)
# Marginal + Pairwise Marginal
joint_post = np.diag(r_t).dot(Pi).dot(np.diag(P_t*q_t))
joint_post = joint_post/np.sum(joint_post)
marg_post = np.sum(joint_post, axis=0)
# Grad for pi
if parameters.pi_type == "logit":
# Gradient of logit_pi
grad['logit_pi'] += weight_t * (joint_post - \
np.diag(np.sum(joint_post, axis=1)).dot(Pi))
elif parameters.pi_type == "expanded":
grad['expanded_pi'] += weight_t * np.array([
(expanded_pi[k]**-1)*(
joint_post[k] - np.sum(joint_post[k])*Pi[k])
for k in range(self.num_states)
])
else:
raise RuntimeError()
# Grad for A and LQinv
x_prev = forward_t.get('x_prev', None)
if (x_prev is not None) and (x_cur is not None):
for k, A_k, LQinv_k, Qinv_k, Q_k in zip(
range(self.num_states), A, LQinv, Qinv, Q):
diff_k = x_cur - A_k.dot(x_prev)
grad['A'][k] = (
np.outer(Qinv_k.dot(diff_k), x_prev)
) * marg_post[k] * weight_t
grad['LQinv'][k] = (
(Q_k - np.outer(diff_k, diff_k)).dot(LQinv_k)
) * marg_post[k] * weight_t
# Grad for C and LRinv
grad['C'] += weight_t * np.outer(np.dot(Rinv, y_t-np.dot(C,x_t)), x_t)
grad['LRinv'] += weight_t * (LRinv_Tinv + \
-1.0*np.dot(np.outer(y_t-np.dot(C,x_t), y_t-np.dot(C,x_t)),
LRinv))
grad['LQinv_vec'] = np.array([grad_LQinv_k[np.tril_indices(self.n)]
for grad_LQinv_k in grad.pop('LQinv')])
grad['LRinv_vec'] = grad.pop('LRinv')[np.tril_indices(self.m)]
return grad
def _z_predictive_loglikelihood(self, observations, x, parameters, lag=10,
forward_message=None, backward_message=None, **kwargs):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
# Calculate Filtered
forward_messages = self.forward_pass(
observations=observations[0:-lag],
parameters=parameters,
x=x,
forward_message=forward_message, **kwargs)
loglike = 0.0
Pi = parameters.pi
for t in range(lag, np.shape(observations)[0]):
# Calculate Pr(z_t | y_{<=t-lag}, theta)
prob_vector = forward_messages[t-lag]['z']['prob_vector']
for l in range(lag):
prob_vector = np.dot(prob_vector, Pi)
x_cur = x[t]
x_prev = x[t-1]
P_t, log_constant = self._likelihoods(x_cur, x_prev, parameters)
likelihood = np.dot(prob_vector, P_t)
loglike += np.log(likelihood) + log_constant
return loglike
def _z_latent_var_sample(self, observations, x, parameters,
forward_message=None, backward_message=None,
distribution='smoothed', tqdm=None):
""" Sample latent vars from observations
Args:
observations (ndarray): num_obs by m observations
x (ndarray): num_obs by n latent continuous variables
parameters (LGSSMParameters): parameters
forward_message (dict): alpha message
backward_message (dict): beta message
distr (string): 'smoothed', 'filtered', 'predict'
smoothed: sample z from Pr(z | Y, theta)
filtered: sample z_t from Pr(z_t | Y_<=t, theta) iid for all t
predictive: sample z_t from Pr(z_t | Y_<t, theta) iid for all t
Returns
z (ndarray): num_obs sampled latent values (in 1,...,K)
"""
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
Pi = parameters.pi
L = np.shape(observations)[0]
z = np.zeros(L, dtype=int)
if np.shape(x)[0] != L:
raise ValueError('observations and x have different shapes')
if distribution == 'smoothed':
# Backward Pass
backward_messages = self.backward_pass(
observations=observations,
x=x,
parameters=parameters,
backward_message=backward_message,
tqdm=tqdm
)
# Forward Sampler
pbar = enumerate(backward_messages)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("forward smoothed sampling z")
x_prev = forward_message.get('x_prev', None)
for t, backward_t in pbar:
x_cur = x[t]
if t == 0:
post_t = np.dot(forward_message['z']['prob_vector'], Pi)
else:
post_t = Pi[z[t-1]]
if x_prev is not None:
P_t, _ = self._likelihoods(
x_cur, x_prev, parameters,
)
post_t = post_t * P_t * backward_t['z']['likelihood_vector']
post_t = post_t/np.sum(post_t)
x_prev = x_cur
z[t] = random_categorical(post_t)
elif distribution == 'filtered':
# Forward Sampler (not a valid probability density)
x_prev = forward_message.get('x_prev', None)
pbar = enumerate(x)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("forward filtered sampling z")
for t, x_cur in pbar:
if t == 0:
post_t = np.dot(forward_message['z']['prob_vector'], Pi)
else:
post_t = Pi[z[t-1]]
if x_prev is not None:
P_t, _ = self._likelihoods(
x_cur, x_prev, parameters,
)
post_t = post_t * P_t
post_t = post_t/np.sum(post_t)
x_prev = x_cur
z[t] = random_categorical(post_t)
elif distribution == 'predictive':
# Forward Sampler (not a valid probability density)
x_prev = forward_message.get('x_prev', None)
pbar = range(np.shape(x)[0])
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("forward filtered sampling z")
for t in pbar:
if t == 0:
prob_vector = np.dot(forward_message['z']['prob_vector'],
Pi)
else:
x_cur = x[t-1]
if x_prev is not None:
P_t, _ = self._likelihoods(
x_cur, x_prev, parameters,
)
prob_vector = prob_vector * P_t
prob_vector = np.dot(prob_vector, Pi)
x_prev = x_cur
prob_vector = prob_vector/np.sum(prob_vector)
z[t] = random_categorical(prob_vector)
else:
raise ValueError("Unrecognized distr {0}".format(distr))
return z
def _likelihoods(self, x_cur, x_prev, parameters):
#if (x_prev is None) or (x_cur is None):
# return np.ones(self.num_states), 0.0
logP_t = self._ar_loglikelihoods(
x_cur=x_cur, x_prev=x_prev, parameters=parameters,
)
log_constant = np.max(logP_t)
logP_t = logP_t - log_constant
P_t = np.exp(logP_t)
return P_t, log_constant
def _ar_loglikelihoods(self, x_cur, x_prev, parameters):
# y_cur should be p+1 by m,
loglikelihoods = np.zeros(self.num_states, dtype=float)
for k, (A_k, LQinv_k) in enumerate(
zip(parameters.A, parameters.LQinv)):
delta = x_cur - np.dot(A_k, x_prev)
LQinvTdelta = np.dot(delta, LQinv_k)
loglikelihoods[k] = \
-0.5 * np.dot(LQinvTdelta, LQinvTdelta) + \
-0.5 * self.m * np.log(2*np.pi) + \
np.sum(np.log(np.diag(LQinv_k)))
return loglikelihoods
def _complete_data_loglikelihood(self, observations, x, z, parameters,
forward_message=None, backward_message=None, weights=None,
**kwargs):
# y is num_obs x m matrix
log_constant = 0.0
Pi = parameters.pi
A = parameters.A
LQinv = parameters.LQinv
C = parameters.C
LRinv = parameters.LRinv
z_prev = forward_message.get('z_prev')
x_prev = forward_message.get('x_prev')
for t, (y_t, x_t, z_t) in enumerate(zip(observations, x, z)):
weight_t = 1.0 if weights is None else weights[t]
# Pr(Z_t | Z_t-1)
if z_prev is not None:
log_constant += weight_t * np.log(Pi[z_prev, z_t])
# Pr(X_t | X_t-1)
if (z_prev is not None) and (x_prev is not None):
diffLQinv = np.dot(x_t - np.dot(A[z_t],x_prev), LQinv[z_t])
log_constant += weight_t * (
-0.5 * self.n * np.log(2*np.pi) + \
-0.5 * np.dot(diffLQinv, diffLQinv) + \
np.sum(np.log(np.diag(LQinv[z_t])))
)
# Pr(Y_t | X_t)
LRinvTymCx = np.dot(LRinv.T, y_t - np.dot(C, x_t))
log_constant += weight_t * (
-0.5 * self.m * np.log(2*np.pi) + \
-0.5*np.dot(LRinvTymCx, LRinvTymCx) + \
np.sum(np.log(np.diag(LRinv)))
)
z_prev = z_t
x_prev = x_t
return log_constant
def _gradient_complete_data_loglikelihood(self, observations, x, z,
parameters,
forward_message=None, backward_message=None, weights=None,
tqdm=None):
if forward_message is None:
forward_message = {}
if backward_message is None:
backward_message = {}
Pi, expanded_pi = parameters.pi, parameters.expanded_pi
A, LQinv = parameters.A, parameters.LQinv
Qinv = parameters.Qinv
C, LRinv = parameters.C, parameters.LRinv
Rinv = parameters.Rinv
LRinv_Tinv = lower_tri_mat_inv(LRinv).T
LQinv_Tinv = np.array([lower_tri_mat_inv(LQinv[k]).T
for k in range(parameters.num_states)])
# Gradients
grad = {var: np.zeros_like(value)
for var, value in parameters.as_dict().items()}
grad['LQinv'] = np.zeros_like(parameters.LQinv)
grad['LRinv'] = np.zeros_like(parameters.LRinv)
# Latent State Gradients
z_prev = forward_message.get('z_prev')
for t, z_t in enumerate(z):
weight_t = 1.0 if weights is None else weights[t]
if z_prev is not None:
if parameters.pi_type == "logit":
logit_pi_grad_t = -Pi[z_prev] + 0.0
logit_pi_grad_t[z_t] += 1.0
grad['logit_pi'][z_prev] += weight_t * logit_pi_grad_t
elif parameters.pi_type == "expanded":
expanded_pi_grad_t = - Pi[z_prev] / expanded_pi[z_prev]
expanded_pi_grad_t[z_t] += 1.0 / expanded_pi[z_prev, z_t]
grad['expanded_pi'][z_prev] += weight_t * expanded_pi_grad_t
z_prev = z_t
# Transition Gradients
x_prev = forward_message.get('x_prev')
for t, (x_t, z_t) in enumerate(zip(x, z)):
weight_t = 1.0 if weights is None else weights[t]
if x_prev is not None:
A_k = A[z_t]
diff = x_t - np.dot(A_k, x_prev)
grad['A'][z_t] += weight_t * np.outer(
np.dot(Qinv[z_t], diff), x_prev)
grad['LQinv'][z_t] += weight_t * (LQinv_Tinv[z_t] + \
-1.0*np.dot(np.outer(diff, diff), LQinv[z_t]))
x_prev = x_t
# Emission Gradients
for t, (x_t, y_t) in enumerate(zip(x, observations)):
weight_t = 1.0 if weights is None else weights[t]
diff = y_t - np.dot(C, x_t)
grad['C'] += weight_t * np.outer(np.dot(Rinv, diff), x_t)
grad['LRinv'] += weight_t * (LRinv_Tinv + \
-1.0*np.dot(np.outer(diff, diff), LRinv))
grad['LQinv_vec'] = np.array([grad_LQinv_k[np.tril_indices(self.n)]
for grad_LQinv_k in grad.pop('LQinv')])
grad['LRinv_vec'] = grad.pop('LRinv')[np.tril_indices(self.m)]
return grad
## Loglikelihood + Gradient Functions
def marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, x=None, z=None,
**kwargs):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
if (z is not None) and (x is not None):
return self._complete_data_loglikelihood(
observations=observations,
x=x, z=z,
parameters=parameters,
forward_message=forward_message,
backward_message=backward_message,
**kwargs)
elif (z is not None) and (x is None):
return self._x_marginal_loglikelihood(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
backward_message=backward_message,
**kwargs)
elif (z is None) and (x is not None):
return self._z_marginal_loglikelihood(
observations=observations,
x=x,
parameters=parameters,
forward_message=forward_message,
backward_message=backward_message,
**kwargs)
else:
raise ValueError("Cannot marginalize both x and z")
def gradient_marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None,
x=None, z=None, **kwargs):
if (z is not None) and (x is not None):
return self._gradient_complete_data_loglikelihood(
observations=observations,
x=x,
z=z,
parameters=parameters,
forward_message=forward_message,
backward_message=backward_message,
**kwargs)
elif (z is not None) and (x is None):
return self._x_gradient_marginal_loglikelihood(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
backward_message=backward_message,
**kwargs)
elif (z is None) and (x is not None):
return self._z_gradient_marginal_loglikelihood(
observations=observations,
x=x,
parameters=parameters,
forward_message=forward_message,
backward_message=backward_message,
**kwargs)
else:
raise ValueError("Cannot marginalize both x and z")
## Gibbs Functions
def calc_gibbs_sufficient_statistic(self, observations, latent_vars,
**kwargs):
""" Gibbs Sample Sufficient Statistics
Args:
observations (ndarray): num_obs observations
latent_vars (dict): latent vars
Returns:
sufficient_stat (dict)
"""
y = observations
x = latent_vars['x']
z = latent_vars['z']
# Sufficient Statistics for Pi
z_pair_count = np.zeros((self.num_states, self.num_states))
for t in range(1, np.size(z)):
z_pair_count[z[t-1], z[t]] += 1.0
# Sufficient Statistics for A and Q
# From Emily Fox's Thesis Page 147
transition_count = np.zeros(self.num_states)
Sx_prevprev = np.zeros((self.num_states, self.n, self.n))
Sx_curprev = np.zeros((self.num_states, self.n, self.n))
Sx_curcur = np.zeros((self.num_states, self.n, self.n))
for k in range(0, self.num_states):
transition_count[k] = np.sum(z == k)
# Construct Psi & Psi_prev Matrices
if np.sum(z[1:] == k) == 0:
# No Sufficient Statistics for No Observations
continue
PsiT = x[1:][z[1:]==k,:]
PsiT_prev = x[:-1][z[1:]==k,:]
# Sufficient Statistics for group k
Sx_prevprev[k] = PsiT_prev.T.dot(PsiT_prev)
Sx_curprev[k] = PsiT.T.dot(PsiT_prev)
Sx_curcur[k] = PsiT.T.dot(PsiT)
# Sufficient Statistics for C and R
# From Emily Fox's Thesis Page 147
PsiT = y
PsiT_prev = x
emission_count = len(PsiT)
S_prevprev = PsiT_prev.T.dot(PsiT_prev)
S_curprev = PsiT.T.dot(PsiT_prev)
S_curcur = PsiT.T.dot(PsiT)
# Return sufficient Statistics
sufficient_stat = {}
sufficient_stat['pi'] = dict(alpha = z_pair_count)
sufficient_stat['A'] = dict(
S_prevprev = Sx_prevprev,
S_curprev = Sx_curprev,
)
sufficient_stat['Q'] = dict(
S_count=transition_count,
S_prevprev = Sx_prevprev,
S_curprev = Sx_curprev,
S_curcur=Sx_curcur,
)
sufficient_stat['C'] = dict(
S_prevprev = S_prevprev,
S_curprev = S_curprev,
)
sufficient_stat['R'] = dict(
S_count=emission_count,
S_prevprev = S_prevprev,
S_curprev = S_curprev,
S_curcur=S_curcur,
)
return sufficient_stat
## Predict Functions
# NotImplemented
| 53,484 | 39.153904 | 103 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/slds/parameters.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...base_parameters import (
BaseParameters, BasePrior, BasePreconditioner,
)
from ...variables import (
TransitionMatrixParamHelper, TransitionMatrixPriorHelper,
TransitionMatrixPrecondHelper,
SquareMatricesParamHelper, SquareMatricesPriorHelper,
SquareMatricesPrecondHelper,
RectMatrixParamHelper, RectMatrixPriorHelper,
RectMatrixPrecondHelper,
CovariancesParamHelper, CovariancesPriorHelper,
CovariancesPrecondHelper,
CovarianceParamHelper, CovariancePriorHelper,
CovariancePrecondHelper,
)
from ..._utils import (
random_categorical,
var_stationary_precision,
)
class SLDSParameters(BaseParameters):
""" SLDS Parameters """
_param_helper_list = [
TransitionMatrixParamHelper(name='pi', dim_names=['num_states', 'pi_type']),
SquareMatricesParamHelper(name='A', dim_names=['n', 'num_states']),
CovariancesParamHelper(name='Q', dim_names=['n', 'num_states']),
RectMatrixParamHelper(name='C', dim_names=['m', 'n']),
CovarianceParamHelper(name='R', dim_names=['m']),
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "SLDSParameters:"
my_str += "\npi:\n" + str(self.pi)
my_str += "\npi_type: `" + str(self.pi_type) + "`"
my_str += "\nA:\n" + str(self.A)
my_str += "\nC:\n" + str(self.C)
my_str += "\nQ:\n" + str(self.Q)
my_str += "\nR:\n" + str(self.R)
return my_str
def project_parameters(self, **kwargs):
if 'C' not in kwargs:
kwargs['C'] = dict(fixed_eye=True)
return super().project_parameters(**kwargs)
class SLDSPrior(BasePrior):
""" SLDS Prior
See individual Prior Mixins for details
"""
_Parameters = SLDSParameters
_prior_helper_list = [
CovariancePriorHelper(name='R', dim_names=['m'],
matrix_name='C'),
CovariancesPriorHelper(name='Q', dim_names=['n', 'num_states'],
matrix_name='A'),
TransitionMatrixPriorHelper(name='pi', dim_names=['num_states']),
SquareMatricesPriorHelper(name='A', dim_names=['n', 'num_states'],
var_row_name='Q'),
RectMatrixPriorHelper(name='C', dim_names=['m', 'n'],
var_row_name='R'),
]
class SLDSPreconditioner(BasePreconditioner):
""" SLDS Preconditioner
See individual Precondition Mixins for details
"""
_precond_helper_list = [
TransitionMatrixPrecondHelper(name='pi', dim_names=['num_states']),
SquareMatricesPrecondHelper(name='A', dim_names=['n', 'num_states'],
var_row_name='Q'),
CovariancesPrecondHelper(name='Q', dim_names=['n', 'num_states']),
RectMatrixPrecondHelper(name='C', dim_names=['m', 'n'],
var_row_name='R'),
CovariancePrecondHelper(name='R', dim_names=['m']),
]
def generate_slds_data(T, parameters, initial_message = None,
tqdm=None):
""" Helper function for generating SLDS time series
Args:
T (int): length of series
parameters (LGSSMParameters): parameters
initial_message (ndarray): prior for u_{-1}
Returns:
data (dict): dictionary containing:
observations (ndarray): T by m
latent_vars (dict):
'x': continuous ndarray T by n
'z': discrete ndarray values in {0,...,num_states-1}
parameters (LGSSMParameters)
init_message (ndarray)
"""
num_states = np.shape(parameters.pi)[0]
m, n = np.shape(parameters.C)
Pi = parameters.pi
A = parameters.A
C = parameters.C
Q = parameters.Q
R = parameters.R
if initial_message is None:
init_precision = np.mean([var_stationary_precision(Qinv_k, A_k, 10)
for Qinv_k, A_k in zip(parameters.Qinv, parameters.A)],
axis=0)
initial_message = {
'x': {
'log_constant': 0.0,
'mean_precision': np.zeros(n),
'precision': init_precision,
},
'z': {
'log_constant': 0.0,
'prob_vector': np.ones(num_states)/num_states,
},
}
latent_vars = {
'x': np.zeros((T, n)),
'z': np.zeros((T), dtype=int),
}
obs_vars = np.zeros((T, m))
latent_prev_z = random_categorical(initial_message['z']['prob_vector'])
latent_prev_x = np.random.multivariate_normal(
mean=np.linalg.solve(initial_message['x']['precision'],
initial_message['x']['mean_precision']),
cov=np.linalg.inv(initial_message['x']['precision']),
)
pbar = range(T)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("generating data")
for t in pbar:
latent_z = random_categorical(Pi[latent_prev_z])
latent_x = np.random.multivariate_normal(
mean=np.dot(A[latent_z], latent_prev_x),
cov=Q[latent_z],
)
obs_vars[t] = np.random.multivariate_normal(
mean=np.dot(C, latent_x),
cov=R,
)
latent_vars['z'][t] = latent_z
latent_vars['x'][t,:] = latent_x
latent_prev_z = latent_z
latent_prev_x = latent_x
data = dict(
observations=obs_vars,
latent_vars=latent_vars,
parameters=parameters,
initial_message=initial_message,
)
return data
| 5,953 | 34.023529 | 88 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/slds/__init__.py | from .parameters import (
SLDSParameters,
SLDSPrior,
SLDSPreconditioner,
generate_slds_data,
)
from .helper import SLDSHelper
from .sampler import SLDSSampler
| 179 | 19 | 32 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/garch/kernels.py | import numpy as np
from ...particle_filters.kernels import Kernel
# GARCH Kernel:
class GARCHPriorKernel(Kernel):
# n = 2, first dimension is x_t, second is sigma2_t
def sample_x0(self, prior_mean, prior_var, N, **kwargs):
""" Initialize x_t
Returns:
x_t (N by n ndarray)
"""
x_t = np.zeros((N, 2))
x_t[:,0] = np.random.normal(
loc=prior_mean,
scale=np.sqrt(prior_var),
size=(N))
return x_t
def prior_log_density(self, x_t, x_next, **kwargs):
""" log density of prior kernel
Args:
x_t (N by n ndarray): x_t
x_next (N by n ndarray): x_{t+1}
Returns:
loglikelihoods (N ndarray): q(x_next | x_t, parameters)
(ignores constants with respect to x_t & x_next
"""
N = np.shape(x_next)[0]
sigma2_next = self.parameters.alpha + \
self.parameters.beta * x_t[:,0]**2 + \
self.parameters.gamma * x_t[:,1]
loglikelihoods = -0.5*x_next[:,0]**2/sigma2_next - \
0.5 * np.log(2.0*np.pi) - 0.5 * np.log(sigma2_next)
loglikelihoods = np.reshape(loglikelihoods, (N))
return loglikelihoods
def get_prior_log_density_max(self):
""" Return max value of log density based on current parameters
Returns max_{x,x'} log q(x | x', parameters)
"""
alpha = self.parameters.alpha
loglikelihood_max = -0.5*np.log(2.0*np.pi) - 0.5*np.log(alpha)
return loglikelihood_max
def rv(self, x_t, **kwargs):
""" Prior Kernel for GARCH
Sample x_{t+1} ~ Pr(x_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
N = np.shape(x_t)[0]
sigma2_next = self.parameters.alpha + \
self.parameters.beta * x_t[:,0]**2 + \
self.parameters.gamma * x_t[:,1]
x_next = np.zeros((N,2))
x_next[:,0] = np.sqrt(sigma2_next) * np.random.normal(size=N)
x_next[:,1] = sigma2_next
return x_next
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for Prior Kernel for GARCH
weight_t = Pr(y_{t+1} | x_{t+1}, parameters)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
diff = self.y_next - x_next[:,0]
log_weights = \
-0.5*np.log(2.0*np.pi) + \
-0.5*(diff**2)*self.parameters.Rinv + \
np.log(self.parameters.LRinv)
log_weights = np.reshape(log_weights, (N))
return log_weights
class GARCHOptimalKernel(Kernel):
# n = 2, first dimension is x_t, second is sigma2_t
def sample_x0(self, prior_mean, prior_var, N, **kwargs):
""" Initialize x_t
Returns:
x_t (N by n ndarray)
"""
x_t = np.zeros((N, 2))
x_t[:,0] = np.random.normal(
loc=prior_mean,
scale=np.sqrt(prior_var),
size=(N))
return x_t
def prior_log_density(self, x_t, x_next, **kwargs):
""" log density of prior kernel
Args:
x_t (N by n ndarray): x_t
x_next (N by n ndarray): x_{t+1}
Returns:
loglikelihoods (N ndarray): q(x_next | x_t, parameters)
(ignores constants with respect to x_t & x_next
"""
N = np.shape(x_next)[0]
sigma2_next = self.parameters.alpha + \
self.parameters.beta * x_t[:,0]**2 + \
self.parameters.gamma * x_t[:,1]
loglikelihoods = -0.5*x_next[:,0]**2/sigma2_next - \
0.5 * np.log(2.0*np.pi) - 0.5 * np.log(sigma2_next)
loglikelihoods = np.reshape(loglikelihoods, (N))
return loglikelihoods
def get_prior_log_density_max(self):
""" Return max value of log density based on current parameters
Returns max_{x,x'} log q(x | x', parameters)
"""
alpha = self.parameters.alpha
loglikelihood_max = -0.5*np.log(2.0*np.pi) - 0.5*np.log(alpha)
return loglikelihood_max
def rv(self, x_t, **kwargs):
""" Optimal Kernel for GARCH
Sample x_{t+1} ~ Pr(x_{t+1} | x_t, y_{t+1}, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
N = np.shape(x_t)[0]
sigma2_next = self.parameters.alpha + \
self.parameters.beta * x_t[:,0]**2 + \
self.parameters.gamma * x_t[:,1]
x_next = np.zeros((N,2))
var_next = (self.parameters.Rinv + sigma2_next**-1)**-1
mean_next = var_next*(self.y_next * self.parameters.Rinv)
x_next[:,0] = mean_next + np.sqrt(var_next) * np.random.normal(size=N)
x_next[:,1] = sigma2_next
return x_next
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for Optimal Kernel for GARCH
weight_t = Pr(y_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
diff = self.y_next
var = x_next[:,1] + self.parameters.R
log_weights = \
-0.5*np.log(2.0*np.pi) + \
-0.5*(diff**2)/var + \
-0.5*np.log(var)
log_weights = np.reshape(log_weights, (N))
return log_weights
| 5,770 | 30.535519 | 78 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/garch/sampler.py | import numpy as np
from ...sgmcmc_sampler import SGMCMCSampler, SeqSGMCMCSampler
from .parameters import GARCHPrior, GARCHParameters
from .helper import GARCHHelper
class GARCHSampler(SGMCMCSampler):
def __init__(self, n=1, m=1, observations=None, prior=None, parameters=None,
forward_message=None, name="GARCHSampler", **kwargs):
self.options = kwargs
self.n = n
self.m = m
self.name = name
self.setup(
observations=observations,
prior=prior,
parameters=parameters,
forward_message=forward_message,
)
return
def setup(self, observations, prior, parameters=None, forward_message=None):
""" Initialize the sampler
Args:
observations (ndarray): T by m ndarray of time series values
prior (GARCHPrior): prior
forward_message (ndarray): prior probability for latent state
parameters (GARCHParameters): initial parameters
(optional, will sample from prior by default)
"""
self.observations = observations
if prior is None:
prior = GARCHPrior.generate_default_prior(n=self.n, m=self.m)
self.prior = prior
if parameters is None:
self.parameters = self.prior.sample_prior()
else:
if not isinstance(parameters, GARCHParameters):
raise ValueError("parameters is not a GARCHParameter")
self.parameters = parameters
self.forward_message = forward_message
self.backward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
}
self.message_helper=GARCHHelper(
n=self.n,
m=self.m,
forward_message=forward_message,
backward_message=self.backward_message,
)
return
def sample_x(self, parameters=None, observations=None, tqdm=None,
num_samples=None, **kwargs):
""" Sample X """
raise NotImplementedError()
def sample_gibbs(self, tqdm=None):
""" One Step of Blocked Gibbs Sampler
Returns:
parameters (GARCHParameters): sampled parameters after one step
"""
raise NotImplementedError()
class SeqGARCHSampler(SeqSGMCMCSampler, GARCHSampler):
pass
| 2,499 | 30.25 | 80 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/garch/helper.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...sgmcmc_sampler import SGMCMCHelper
from .kernels import GARCHPriorKernel, GARCHOptimalKernel
from ...particle_filters.buffered_smoother import (
buffered_pf_wrapper,
average_statistic,
)
class GARCHHelper(SGMCMCHelper):
""" GARCH Helper
forward_message (dict) with keys
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
backward_message (dict) with keys
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
"""
def __init__(self, n=1, m=1, forward_message=None, backward_message=None,
**kwargs):
self.n = n
self.m = m
self.default_forward_message=forward_message
if backward_message is None:
backward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
}
self.default_backward_message=backward_message
return
def _forward_messages(self, observations, parameters, forward_message,
weights=None, tqdm=None):
raise NotImplementedError('GARCH does not have analytic message passing')
def _backward_messages(self, observations, parameters, backward_message,
weights=None, tqdm=None):
raise NotImplementedError('GARCH does not have analytic message passing')
def _get_kernel(self, kernel):
if kernel is None:
kernel = "optimal"
if kernel == "prior":
Kernel = GARCHPriorKernel()
elif kernel == "optimal":
Kernel = GARCHOptimalKernel()
else:
raise ValueError("Unrecoginized kernel = {0}".format(kernel))
return Kernel
def pf_gradient_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Filter Score Estimate
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
weights (ndarray): weights for [subsequence_start, subsequence_end)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(X_t | X_{t-1})
"optimal" - bootstrap filter P(X_t | X_{t-1}, Y_t)
**kwargs - additional keyword args for individual filters
Return:
grad (dict): grad of variables in parameters
"""
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
prior_mean, prior_var = self._get_prior_x(forward_message, parameters)
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=garch_complete_data_loglike_gradient,
statistic_dim=4,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
**kwargs
)
score_estimate = average_statistic(out)
grad = dict(
LRinv_vec = score_estimate[0],
log_mu = score_estimate[1],
logit_phi = score_estimate[2],
logit_lambduh = score_estimate[3],
)
return grad
def pf_loglikelihood_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Filter Marginal Log-Likelihood Estimate
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
weights (ndarray): weights for [subsequence_start, subsequence_end)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(X_t | X_{t-1})
"optimal" - bootstrap filter P(X_t | X_{t-1}, Y_t)
**kwargs - additional keyword args for individual filters
Return:
loglikelihood (double): marignal log likelihood estimate
"""
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
prior_mean, prior_var = self._get_prior_x(forward_message, parameters)
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=garch_sufficient_statistics,
statistic_dim=3,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
**kwargs
)
loglikelihood = out['loglikelihood_estimate']
return loglikelihood
def pf_predictive_loglikelihood_estimate(self, observations, parameters,
num_steps_ahead=5,
subsequence_start=0, subsequence_end=None,
pf="filter", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Filter Predictive Log-Likelihood Estimate
Returns predictive log-likleihood for k = [0,1, ...,num_steps_ahead]
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
num_steps_ahead (int): number of steps
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
N (int): number of particles used by particle filter
kernel (string): kernel to use
**kwargs - additional keyword args for individual filters
Return:
loglikelihood (double): marignal log likelihood estimate
"""
if pf != "filter":
raise ValueError("Only can use pf = 'filter' since we are filtering")
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
prior_mean, prior_var = self._get_prior_x(forward_message, parameters)
from functools import partial
prior_kernel = self._get_kernel("prior")
prior_kernel.set_parameters(parameters=parameters)
additive_statistic_func = partial(garch_predictive_loglikelihood,
num_steps_ahead=num_steps_ahead,
observations=observations,
prior_kernel=prior_kernel,
)
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=additive_statistic_func,
statistic_dim=num_steps_ahead+1,
t1=subsequence_start,
tL=subsequence_end,
prior_mean=prior_mean,
prior_var=prior_var,
logsumexp=True,
**kwargs
)
predictive_loglikelihood = out['statistics']
predictive_loglikelihood[0] = out['loglikelihood_estimate']
return predictive_loglikelihood
def pf_latent_var_marginal(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
squared=False,
**kwargs):
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
prior_mean, prior_var = self._get_prior_x(forward_message, parameters)
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=garch_sufficient_statistics,
statistic_dim=3,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
elementwise_statistic=True,
**kwargs
)
avg_statistic = average_statistic(out)
avg_statistic = np.reshape(avg_statistic, (-1, 3))
if not squared:
x_mean = avg_statistic[:, 0]
x_cov = avg_statistic[:, 1] - x_mean**2
else:
x_mean = avg_statistic[:, 1]
x_cov = avg_statistic[:, 2] - x_mean**2
x_mean = np.reshape(x_mean, (x_mean.shape[0], 1))
x_cov = np.reshape(x_cov, (x_cov.shape[0], 1, 1))
return x_mean, x_cov
def pf_latent_var_distr(self, observations, parameters, lag=None,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
squared=False,
**kwargs):
if lag == 0 and pf != 'filter':
raise ValueError("pf must be filter for lag = 0")
elif lag is None and pf == 'filter':
raise ValueError("pf must not be filter for smoothing")
elif lag is not None and lag != 0:
raise NotImplementedError("lag can only be None or 0")
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
prior_mean, prior_var = self._get_prior_x(forward_message, parameters)
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=garch_sufficient_statistics,
statistic_dim=3,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
elementwise_statistic=True,
**kwargs
)
avg_statistic = average_statistic(out)
avg_statistic = np.reshape(avg_statistic, (-1, 3))
if not squared:
x_mean = avg_statistic[:, 0]
x_cov = avg_statistic[:, 1] - x_mean**2
else:
x_mean = avg_statistic[:, 1]
x_cov = avg_statistic[:, 2] - x_mean**2
x_mean = np.reshape(x_mean, (x_mean.shape[0], 1))
x_cov = np.reshape(x_cov, (x_cov.shape[0], 1, 1))
return x_mean, x_cov
def _get_prior_x(self, forward_message, parameters):
if forward_message is None:
prior_mean = 0
prior_var = parameters.alpha/(1-parameters.beta-parameters.gamma)
else:
prior_var = np.linalg.inv(forward_message['precision'])
prior_mean = np.linalg.solve(prior_var,
forward_message['mean_precision'])
return prior_mean, prior_var
# Additive Statistics
def garch_complete_data_loglike_gradient(x_t, x_next, y_next, parameters, **kwargs):
""" Gradient of Complete Data Log-Likelihood
Gradient w/r.t. parameters of log Pr(y_{t+1}, x_{t+1} | x_t, parameters)
Args:
x_t (N by n ndarray): particles for x_t
x_next (N by n ndarray): particles for x_{t+1}
y_next (m ndarray): y_{t+1}
parameters (Parameters): parameters
Returns:
grad_complete_data_loglike (N by p ndarray):
gradient of complete data loglikelihood for particles
[ grad_LRinv, grad_LQinv, grad_A ]
"""
N, _ = np.shape(x_next)
mu = parameters.mu
phi = parameters.phi
lambduh = parameters.lambduh
LRinv = parameters.LRinv
Rinv = parameters.Rinv
v = x_next[:,1]
grad_v = -0.5*(v-x_next[:,0]**2)/(v**2)
grad_log_mu = grad_v * (1-phi) * mu
grad_logit_phi = (grad_v * \
(-mu + lambduh*x_t[:,0]**2 + (1-lambduh)*x_t[:,1]) * (1-phi)*phi
)
grad_logit_lambduh = (grad_v * \
phi*(x_t[:,0]**2 - x_t[:,1]) * (1-lambduh)*lambduh
)
diff_y = y_next - x_next[:,0]
grad_LRinv = (LRinv**-1) - (diff_y**2) * LRinv
grad_complete_data_loglike = np.array([
grad_LRinv[0], grad_log_mu, grad_logit_phi, grad_logit_lambduh]).T
return grad_complete_data_loglike
def garch_predictive_loglikelihood(x_t, x_next, t, num_steps_ahead,
parameters, observations, prior_kernel, Ntilde=1,
**kwargs):
""" Predictive Log-Likelihood
Calculate [Pr(y_{t+1+k} | x_{t+1} for k in [0,..., num_steps_ahead]]
Uses MC so is very noisy for large k
Args:
x_t (N by n ndarray): particles for x_t
x_next (N by n ndarray): particles for x_{t+1}
num_steps_ahead (int):
parameters (Parameters): parameters
observations (T by m ndarray): y
Ntilde (int): number of MC samples
Returns:
predictive_loglikelihood (N by num_steps_ahead+1 ndarray)
"""
N = np.shape(x_next)[0]
T = np.shape(observations)[0]
predictive_loglikelihood = np.zeros((N, num_steps_ahead+1))
x_pred = x_next + 0
R = parameters.R
for k in range(num_steps_ahead+1):
if t+k >= T:
break
diff = np.ones(N)*observations[t+k] - x_pred[:,0]
y_pred_cov = R
pred_loglike = -0.5*diff**2/y_pred_cov + \
-0.5*np.log(2.0*np.pi) - 0.5*np.log(y_pred_cov)
predictive_loglikelihood[:,k] = pred_loglike
x_pred = prior_kernel.rv(x_pred)
return predictive_loglikelihood
def garch_sufficient_statistics(x_t, x_next, y_next, **kwargs):
""" GARCH Sufficient Statistics
h[0] = sum(x_{t+1})
h[1] = sum(x_{t+1}**2)
h[2] = sum(x_{t+1}**4)
Args:
x_t (N by n ndarray): particles for x_t
x_next (N by n ndarray): particles for x_{t+1}
y_next (m ndarray): y_{t+1}
Returns:
h (N by p ndarray): sufficient statistic
"""
N = np.shape(x_t)[0]
h = np.array([x_next[:,0], x_next[:,0]**2, x_next[:,0]**4]).T
return h
| 15,844 | 35.425287 | 84 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/garch/parameters.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...base_parameters import (
BaseParameters, BasePrior, BasePreconditioner,
)
from ...variables import (
CovarianceParamHelper, CovariancePriorHelper,
)
from ...variables.garch_var import (
GARCHParamHelper, GARCHPriorHelper,
)
from ..._utils import var_stationary_precision
from scipy.special import logit
class GARCHParameters(BaseParameters):
""" GARCH Parameters """
_param_helper_list = [
GARCHParamHelper(),
CovarianceParamHelper(name='R', dim_names=['m']),
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "GARCHParameters:"
my_str +="\nalpha:{0}, beta:{1}, gamma:{2}, tau:{3}\n".format(
np.around(np.asscalar(self.alpha), 6),
np.around(np.asscalar(self.beta), 6),
np.around(np.asscalar(self.gamma), 6),
np.around(np.asscalar(self.tau), 6))
return my_str
@property
def tau(self):
if self.m == 1:
tau = self.LRinv ** -1
else:
tau = np.linalg.inv(self.LRinv.T)
return tau
@staticmethod
def convert_alpha_beta_gamma(alpha, beta, gamma):
""" Convert alpha, beta, gamma to log_mu, logit_phi, logit_lambduh
mu = alpha / (1- beta - gamma)
phi = beta + gamma
lambda = beta / (beta + gamma)
"""
if alpha <= 0 or beta <= 0 or gamma <= 0:
raise ValueError("Cannot have alpha, beta, or gamma <= 0")
if beta + gamma >=1:
raise ValueError("Cannot have beta + gamma >- 1")
log_mu = np.log(alpha/(1 - beta - gamma))
logit_phi = logit(beta + gamma)
logit_lambduh = logit(beta/(beta + gamma))
return log_mu, logit_phi, logit_lambduh
class GARCHPrior(BasePrior):
""" GARCH Prior
See individual Prior Mixins for details
"""
_Parameters = GARCHParameters
_prior_helper_list = [
GARCHPriorHelper(),
CovariancePriorHelper(name='R', dim_names=['m']),
]
def generate_garch_data(T, parameters, initial_message = None,
tqdm=None):
""" Helper function for generating GARCH time series
Args:
T (int): length of series
parameters (GARCHParameters): parameters
initial_message (ndarray): prior for u_{-1}
Returns:
data (dict): dictionary containing:
observations (ndarray): T by m
latent_vars (ndarray): T by n
parameters (GARCHParameters)
init_message (ndarray)
"""
n = 1
m = 1
alpha = parameters.alpha
beta = parameters.beta
gamma = parameters.gamma
R = parameters.R
if initial_message is None:
init_precision = np.atleast_2d((1 - beta - gamma)/alpha)
initial_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(n),
'precision': init_precision,
}
latent_vars = np.zeros((T, n), dtype=float)
sigma2s = np.zeros((T), dtype=float)
obs_vars = np.zeros((T, m), dtype=float)
latent_prev = np.random.multivariate_normal(
mean=np.linalg.solve(initial_message['precision'],
initial_message['mean_precision']),
cov=np.linalg.inv(initial_message['precision']),
)
pbar = range(T)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("generating data")
sigma2_prev = 0
for t in pbar:
sigma2s[t] = alpha + beta*latent_prev**2 + gamma*sigma2_prev
latent_vars[t] = np.random.multivariate_normal(
mean=np.zeros(1),
cov=np.array([[sigma2s[t]]]),
)
obs_vars[t] = np.random.multivariate_normal(
mean=latent_vars[t],
cov=R,
)
latent_prev = latent_vars[t]
sigma2_prev = sigma2s[t]
data = dict(
observations=obs_vars,
latent_vars=latent_vars,
sigma2s=sigma2s,
parameters=parameters,
initial_message=initial_message,
)
return data
| 4,397 | 29.971831 | 74 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/garch/__init__.py | from .parameters import (
GARCHParameters,
GARCHPrior,
generate_garch_data,
)
from .helper import GARCHHelper
from .kernels import GARCHPriorKernel, GARCHOptimalKernel
from .sampler import GARCHSampler, SeqGARCHSampler
| 248 | 23.9 | 57 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/svm/kernels.py | import numpy as np
from ...particle_filters.kernels import LatentGaussianKernel
# SVM Kernels:
class SVMPriorKernel(LatentGaussianKernel):
def set_parameters(self, parameters):
self.parameters = parameters
if np.abs(parameters.A) > 1:
raise ValueError("Current AR parameter is |A| = {0} > 1".format(
np.abs(parameters.A)) + "\nTry calling project_parameters?")
return
def rv(self, x_t, **kwargs):
""" Prior Kernel for SVM
Sample x_{t+1} ~ Pr(x_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# x is vector
x_next_mean = x_t.dot(self.parameters.A.T)
x_next = np.linalg.solve(self.parameters.LQinv.T,
np.random.normal(size=x_t.shape).T).T + x_next_mean
return x_next
else:
# n = 1, x is scalar
x_next_mean = x_t * self.parameters.A
x_next = self.parameters.LQinv**-1 * np.random.normal(
size=x_t.shape) + x_next_mean
return x_next
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for Prior Kernel for SVM
weight_t = Pr(y_{t+1} | x_{t+1}, parameters)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# n > 1
raise NotImplementedError()
else:
# n = 1, x is scalar
diff = self.y_next
log_weights = \
-0.5*np.log(2.0*np.pi) + \
-0.5*(diff**2)*np.exp(-x_next)*self.parameters.Rinv + \
np.log(self.parameters.LRinv) + \
-0.5*x_next
log_weights = np.reshape(log_weights, (N))
return log_weights
| 2,117 | 31.090909 | 76 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/svm/sampler.py | import numpy as np
from ...sgmcmc_sampler import SGMCMCSampler, SeqSGMCMCSampler
from .parameters import SVMPrior, SVMParameters
from .helper import SVMHelper
class SVMSampler(SGMCMCSampler):
def __init__(self, n, m, observations=None, prior=None, parameters=None,
forward_message=None, name="SVMSampler", **kwargs):
self.options = kwargs
self.n = n
self.m = m
self.name = name
self.setup(
observations=observations,
prior=prior,
parameters=parameters,
forward_message=forward_message,
)
return
def setup(self, observations=None, prior=None,
parameters=None, forward_message=None):
""" Initialize the sampler
Args:
observations (ndarray): T by m ndarray of time series values
prior (SVMPrior): prior
forward_message (ndarray): prior probability for latent state
parameters (SVMParameters): initial parameters
(optional, will sample from prior by default)
"""
self.observations = observations
if prior is None:
prior = SVMPrior.generate_default_prior(n=self.n, m=self.m)
self.prior = prior
if parameters is None:
self.parameters = self.prior.sample_prior().project_parameters()
else:
if not isinstance(parameters, SVMParameters):
raise ValueError("parameters is not a SVMParameter")
self.parameters = parameters
if forward_message is None:
forward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.eye(self.n)/10,
}
self.forward_message = forward_message
self.backward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
}
self.message_helper=SVMHelper(
n=self.n,
m=self.m,
forward_message=forward_message,
backward_message=self.backward_message,
)
return
def sample_x(self, parameters=None, observations=None, tqdm=None,
num_samples=None, **kwargs):
""" Sample X """
raise NotImplementedError()
def sample_gibbs(self, tqdm=None):
""" One Step of Blocked Gibbs Sampler
Returns:
parameters (SVMParameters): sampled parameters after one step
"""
raise NotImplementedError()
class SeqSVMSampler(SeqSGMCMCSampler, SVMSampler):
pass
| 2,745 | 32.487805 | 76 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/svm/helper.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...sgmcmc_sampler import SGMCMCHelper
from .kernels import SVMPriorKernel
from ...particle_filters.buffered_smoother import (
buffered_pf_wrapper,
average_statistic,
)
from ..lgssm.helper import gaussian_sufficient_statistics
class SVMHelper(SGMCMCHelper):
""" SVM Helper
forward_message (dict) with keys
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
backward_message (dict) with keys
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
"""
def __init__(self, n, m, forward_message=None, backward_message=None,
**kwargs):
self.n = n
self.m = m
if forward_message is None:
forward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.eye(self.n)/10,
}
self.default_forward_message=forward_message
if backward_message is None:
backward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
}
self.default_backward_message=backward_message
return
def _forward_messages(self, observations, parameters, forward_message,
weights=None, tqdm=None, **kwargs):
raise NotImplementedError('SVM does not have analytic message passing')
def _backward_messages(self, observations, parameters, backward_message,
weights=None, tqdm=None, **kwargs):
raise NotImplementedError('SVM does not have analytic message passing')
def _get_kernel(self, kernel):
if kernel is None:
kernel = "prior"
if kernel == "prior":
Kernel = SVMPriorKernel()
elif kernel == "optimal":
raise NotImplementedError("SVM optimal kernel not analytic")
else:
raise ValueError("Unrecoginized kernel = {0}".format(kernel))
return Kernel
def pf_gradient_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Filter Score Estimate
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
weights (ndarray): weights for [subsequence_start, subsequence_end)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(X_t | X_{t-1})
"optimal" - bootstrap filter P(X_t | X_{t-1}, Y_t)
**kwargs - additional keyword args for individual filters
Return:
grad (dict): grad of variables in parameters
"""
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
if forward_message is None:
forward_message = self.default_forward_message
prior_var = np.linalg.inv(forward_message['precision'])
prior_mean = np.linalg.solve(prior_var,
forward_message['mean_precision'])
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=svm_complete_data_loglike_gradient,
statistic_dim=3,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
**kwargs
)
score_estimate = average_statistic(out)
grad = dict(
LRinv_vec = score_estimate[0],
LQinv_vec = score_estimate[1],
A = score_estimate[2],
)
return grad
def pf_loglikelihood_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Filter Marginal Log-Likelihood Estimate
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
weights (ndarray): weights for [subsequence_start, subsequence_end)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(X_t | X_{t-1})
"optimal" - bootstrap filter P(X_t | X_{t-1}, Y_t)
**kwargs - additional keyword args for individual filters
Return:
loglikelihood (double): marignal log likelihood estimate
"""
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
if forward_message is None:
forward_message = self.default_forward_message
prior_var = np.linalg.inv(forward_message['precision'])
prior_mean = np.linalg.solve(prior_var,
forward_message['mean_precision'])
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=gaussian_sufficient_statistics,
statistic_dim=3,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
**kwargs
)
loglikelihood = out['loglikelihood_estimate']
return loglikelihood
def pf_predictive_loglikelihood_estimate(self, observations, parameters,
num_steps_ahead=5,
subsequence_start=0, subsequence_end=None,
pf="filter", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Filter Predictive Log-Likelihood Estimate
Returns predictive log-likleihood for k = [0,1, ...,num_steps_ahead]
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
num_steps_ahead (int): number of steps
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
N (int): number of particles used by particle filter
kernel (string): kernel to use
**kwargs - additional keyword args for individual filters
Return:
predictive_loglikelihood (num_steps_ahead + 1 ndarray)
"""
if pf != "filter":
raise ValueError("Only can use pf = 'filter' since we are filtering")
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
if forward_message is None:
forward_message = self.default_forward_message
prior_var = np.linalg.inv(forward_message['precision'])
prior_mean = np.linalg.solve(prior_var,
forward_message['mean_precision'])
from functools import partial
additive_statistic_func = partial(svm_predictive_loglikelihood,
num_steps_ahead=num_steps_ahead,
observations=observations,
)
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=additive_statistic_func,
statistic_dim=num_steps_ahead+1,
t1=subsequence_start,
tL=subsequence_end,
prior_mean=prior_mean,
prior_var=prior_var,
logsumexp=True,
**kwargs
)
predictive_loglikelihood = out['statistics']
predictive_loglikelihood[0] = out['loglikelihood_estimate']
return predictive_loglikelihood
def pf_latent_var_distr(self, observations, parameters, lag=None,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
if lag == 0 and pf != 'filter':
raise ValueError("pf must be filter for lag = 0")
elif lag is None and pf == 'filter':
raise ValueError("pf must not be filter for smoothing")
elif lag is not None and lag != 0:
raise NotImplementedError("lag can only be None or 0")
# Set kernel
Kernel = self._get_kernel(kernel)
# Prior Mean + Variance
if forward_message is None:
forward_message = self.default_forward_message
prior_var = np.linalg.inv(forward_message['precision'])
prior_mean = np.linalg.solve(prior_var,
forward_message['mean_precision'])
# Run buffered pf
out = buffered_pf_wrapper(pf=pf,
observations=observations,
parameters=parameters,
N=N,
kernel=Kernel,
additive_statistic_func=gaussian_sufficient_statistics,
statistic_dim=3,
t1=subsequence_start,
tL=subsequence_end,
weights=weights,
prior_mean=prior_mean,
prior_var=prior_var,
elementwise_statistic=True,
**kwargs
)
avg_statistic = average_statistic(out)
avg_statistic = np.reshape(avg_statistic, (-1, 3))
x_mean = avg_statistic[:, 0]
x_cov = avg_statistic[:, 1] - x_mean**2
x_mean = np.reshape(x_mean, (x_mean.shape[0], 1))
x_cov = np.reshape(x_cov, (x_cov.shape[0], 1, 1))
return x_mean, x_cov
# Additive Statistics
def svm_complete_data_loglike_gradient(x_t, x_next, y_next, parameters, **kwargs):
""" Gradient of Complete Data Log-Likelihood
Gradient w/r.t. parameters of log Pr(y_{t+1}, x_{t+1} | x_t, parameters)
Args:
x_t (N by n ndarray): particles for x_t
x_next (N by n ndarray): particles for x_{t+1}
y_next (m ndarray): y_{t+1}
parameters (Parameters): parameters
Returns:
grad_complete_data_loglike (N by p ndarray):
gradient of complete data loglikelihood for particles
[ grad_LRinv, grad_LQinv, grad_A ]
"""
N, n = np.shape(x_next)
m = np.shape(y_next)[0]
A = parameters.A
LQinv = parameters.LQinv
Qinv = parameters.Qinv
LRinv = parameters.LRinv
Rinv = parameters.Rinv
grad_complete_data_loglike = [None] * N
if (n != 1) or (m != 1):
LQinv_Tinv = np.linalg.inv(LQinv).T
LRinv_Tinv = np.linalg.inv(LRinv).T
for i in range(N):
grad = {}
diff = x_next[i] - np.dot(A, x_t[i])
grad['A'] = np.outer(
np.dot(Qinv, diff), x_t[i])
grad['LQinv'] = LQinv_Tinv + -1.0*np.dot(np.outer(diff, diff), LQinv)
diff2 = y_next**2 /np.exp(x_next[i])
grad['LRinv'] = LRinv_Tinv + -1.0*np.dot(diff2, LRinv)
grad_complete_data_loglike[i] = np.concatenate([
grad['LRinv'].flatten(),
grad['LQinv'].flatten(),
grad['A'].flatten(),
])
grad_complete_data_loglike = np.array(grad_complete_data_loglike)
else:
diff_x = x_next - A * x_t
grad_A = Qinv * diff_x * x_t
grad_LQinv = (LQinv**-1) - (diff_x**2) * LQinv
diff_y2 = y_next**2/np.exp(x_next)
grad_LRinv = (LRinv**-1) - (diff_y2) * LRinv
grad_complete_data_loglike = np.hstack([
grad_LRinv, grad_LQinv, grad_A])
return grad_complete_data_loglike
def svm_predictive_loglikelihood(x_t, x_next, t, num_steps_ahead,
parameters, observations, Ntilde=1,
**kwargs):
""" Predictive Log-Likelihood
Calculate [Pr(y_{t+1+k} | x_{t+1} for k in [0,..., num_steps_ahead]]
Args:
x_t (N by n ndarray): particles for x_t
x_next (N by n ndarray): particles for x_{t+1}
num_steps_ahead
parameters (Parameters): parameters
observations (T by m ndarray): y
Ntilde (int): number of MC samples
Returns:
predictive_loglikelihood (N by num_steps_ahead+1 ndarray)
"""
N, n = np.shape(x_next)
T, m = np.shape(observations)
predictive_loglikelihood = np.zeros((N, num_steps_ahead+1))
x_pred_mean = x_next + 0.0
x_pred_cov = 0.0
R, Q = parameters.R, parameters.Q
for k in range(num_steps_ahead+1):
if t+k >= T:
break
diff = observations[t+k]
x_mc = (np.outer(x_pred_mean, np.ones(Ntilde)) + \
np.sqrt(x_pred_cov)*np.random.normal(size=(N,Ntilde)))
y_pred_cov = R*np.exp(x_mc)
pred_loglike = np.mean(
-0.5*diff**2/y_pred_cov + \
-0.5*np.log(2.0*np.pi) - 0.5*np.log(y_pred_cov),
axis = 1)
predictive_loglikelihood[:,k] = pred_loglike
x_pred_mean = parameters.A * x_pred_mean
x_pred_cov = Q + parameters.A**2 * x_pred_cov
return predictive_loglikelihood
| 14,913 | 36.472362 | 82 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/svm/parameters.py | import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...base_parameters import (
BaseParameters, BasePrior, BasePreconditioner,
)
from ...variables import (
SquareMatrixParamHelper, SquareMatrixPriorHelper,
SquareMatrixPrecondHelper,
RectMatrixParamHelper, RectMatrixPriorHelper,
RectMatrixPrecondHelper,
CovarianceParamHelper, CovariancePriorHelper,
CovariancePrecondHelper,
)
from ..._utils import var_stationary_precision
class SVMParameters(BaseParameters):
""" SVM Parameters """
_param_helper_list = [
SquareMatrixParamHelper(name='A', dim_names=['n']),
CovarianceParamHelper(name='Q', dim_names=['n']),
CovarianceParamHelper(name='R', dim_names=['m']),
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "SVMParameters:"
if self.n == 1:
my_str +="\nA:{0}, Q:{1}, R:{2}\n".format(
self.A[0,0], self.Q[0,0], self.R[0,0])
else:
my_str += "\nA:\n" + str(self.A)
my_str += "\nQ:\n" + str(self.Q)
my_str += "\nR:\n" + str(self.R)
return my_str
@property
def phi(self):
phi = self.A
return phi
@property
def sigma(self):
if self.n == 1:
sigma = self.LQinv ** -1
else:
sigma = np.linalg.inv(self.LQinv.T)
return sigma
@property
def tau(self):
if self.m == 1:
tau = self.LRinv ** -1
else:
tau = np.linalg.inv(self.LRinv.T)
return tau
class SVMPrior(BasePrior):
""" SVM Prior
See individual Prior Mixins for details
"""
_Parameters = SVMParameters
_prior_helper_list = [
CovariancePriorHelper(name='Q', dim_names=['n'], matrix_name='A'),
CovariancePriorHelper(name='R', dim_names=['m']),
SquareMatrixPriorHelper(name='A', dim_names=['n'],
var_row_name='Q'),
]
def generate_svm_data(T, parameters, initial_message = None,
tqdm=None):
""" Helper function for generating SVM time series
Args:
T (int): length of series
parameters (SVMParameters): parameters
initial_message (ndarray): prior for u_{-1}
Returns:
data (dict): dictionary containing:
observations (ndarray): T by m
latent_vars (ndarray): T by n
parameters (SVMParameters)
init_message (ndarray)
"""
n, _ = np.shape(parameters.A)
m, _ = np.shape(parameters.R)
A = parameters.A
Q = parameters.Q
R = parameters.R
if initial_message is None:
init_precision = var_stationary_precision(
parameters.Qinv, parameters.A, 10)
initial_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(n),
'precision': init_precision,
}
latent_vars = np.zeros((T, n), dtype=float)
obs_vars = np.zeros((T, m), dtype=float)
latent_prev = np.random.multivariate_normal(
mean=np.linalg.solve(initial_message['precision'],
initial_message['mean_precision']),
cov=np.linalg.inv(initial_message['precision']),
)
pbar = range(T)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("generating data")
for t in pbar:
latent_vars[t] = np.random.multivariate_normal(
mean=np.dot(A, latent_prev),
cov=Q,
)
obs_vars[t] = np.random.multivariate_normal(
mean=np.zeros(1),
cov=np.exp(latent_vars[t])*R,
)
latent_prev = latent_vars[t]
data = dict(
observations=obs_vars,
latent_vars=latent_vars,
parameters=parameters,
initial_message=initial_message,
)
return data
| 4,148 | 29.065217 | 78 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/models/svm/__init__.py | from .parameters import (
SVMParameters,
SVMPrior,
generate_svm_data,
)
from .helper import SVMHelper
from .kernels import SVMPriorKernel
from .sampler import SVMSampler, SeqSVMSampler
| 214 | 20.5 | 46 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/variables/covariance.py | import numpy as np
import scipy.stats
import logging
from ..base_parameters import (
ParamHelper, PriorHelper, PrecondHelper,
get_value_func, get_hyperparam_func, get_dim_func,
set_value_func, set_hyperparam_func,
)
from .._utils import (
array_wishart_rvs,
pos_def_mat_inv,
tril_vector_to_mat,
)
logger = logging.getLogger(name=__name__)
# Implementations of Covariance Parameters
# Single Covariance
class CovarianceParamHelper(ParamHelper):
def __init__(self, name='Q', dim_names=None):
self.name = name
self._lt_vec_name = 'L{}inv_vec'.format(name)
self._lt_prec_name = 'L{}inv'.format(name)
self._inv_name = '{}inv'.format(name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
# Handle kwargs input
if self._lt_vec_name in kwargs:
# LQinv_vec
LQinv_vec = np.array(kwargs[self._lt_vec_name]).astype(float)
n = int(np.sqrt(len(LQinv_vec)*2))
param.var_dict[self._lt_vec_name] = LQinv_vec
param._set_check_dim(**{self.dim_names[0]: n})
elif self._lt_prec_name in kwargs:
# LQinv
n, n2 = np.shape(kwargs[self._lt_prec_name])
if n != n2:
raise ValueError("{} must be square matrix".format(
self._lt_prec_name))
LQinv = np.array(kwargs[self._lt_prec_name]).astype(float)
LQinv_vec = LQinv[np.tril_indices_from(LQinv)]
param.var_dict[self._lt_vec_name] = LQinv_vec
param._set_check_dim(**{self.dim_names[0]: n})
elif self.name in kwargs:
# Q
n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrix".format(
self.name))
LQinv = np.linalg.cholesky(np.linalg.inv(
np.array(kwargs[self.name]).astype(float)
))
LQinv_vec = LQinv[np.tril_indices_from(LQinv)]
param.var_dict[self._lt_vec_name] = LQinv_vec
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self._lt_prec_name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
setattr(param, self._lt_vec_name, name_kwargs['fixed'].copy())
if name_kwargs.get('thresh', True):
LQinv = getattr(param, self._lt_prec_name)
LQinv[np.triu_indices_from(LQinv, 1)] = 0
if np.any(np.diag(LQinv) < 0.0):
logger.info(
"Reflecting {0}: {1} < 0.0".format(
self._lt_prec_name, LQinv)
)
LQinv[:] = np.linalg.cholesky(
np.dot(LQinv, LQinv.T) + \
np.eye(param.dim[self.dim_names[0]])*1e-16
)
setattr(param, self._lt_prec_name, LQinv)
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
LQinv_vec = var_dict[self._lt_vec_name]
if np.isscalar(LQinv_vec):
vector_list.append([LQinv_vec])
else:
vector_list.append(LQinv_vec)
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
LQinv_vec = np.zeros((n+1)*n//2)
LQinv_vec = vector[vector_index:vector_index+(n+1)*n//2]
var_dict[self._lt_vec_name] = LQinv_vec
return vector_index+(n+1)*n//2
def get_properties(self):
properties = {}
properties[self._lt_vec_name] = property(
fget=get_value_func(self._lt_vec_name),
fset=set_value_func(self._lt_vec_name),
doc="{0} is a ({1}+1){1}/2 vector of lower tri matrix".format(
self._lt_vec_name, self.dim_names[0]),
)
properties[self._lt_prec_name] = property(
fget=get_LQinv_func(self.name),
fset=set_LQinv_func(self.name),
doc="{0} is a {1} by {1} lower triangular matrix".format(
self._lt_prec_name, self.dim_names[0]),
)
properties[self._inv_name] = property(
fget=get_Qinv_func(self.name),
doc="{0} is a {1} by {1} precision matrix".format(
self._inv_name, self.dim_names[0]),
)
properties[self.name] = property(
fget=get_Q_func(self.name),
doc="{0} is a {1} by {1} covariance matrix".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
def get_LQinv_func(name):
def fget(self):
LQinv = tril_vector_to_mat(getattr(self, "L{0}inv_vec".format(name)))
return LQinv
return fget
def set_LQinv_func(name):
def fset(self, value):
LQinv_vec = value[np.tril_indices_from(value)]
self.var_dict["L{0}inv_vec".format(name)] = LQinv_vec
return
return fset
def get_Qinv_func(name):
def fget(self):
LQinv = getattr(self, "L{0}inv".format(name))
Qinv = LQinv.dot(LQinv.T) + 1e-16*np.eye(LQinv.shape[0])
return Qinv
return fget
def get_Q_func(name):
def fget(self):
Qinv = getattr(self, "{0}inv".format(name))
if np.size(Qinv) == 1:
Q = Qinv**-1
else:
Q = pos_def_mat_inv(Qinv)
return Q
return fget
class CovariancePriorHelper(PriorHelper):
def __init__(self, name='Q', dim_names=None, matrix_name=None):
self.name = name
self._scale_name = 'scale_{0}inv'.format(name)
self._df_name = 'df_{0}inv'.format(name)
self._inv_name = '{0}inv'.format(name)
self._lt_prec_name = 'L{0}inv'.format(name)
self._lt_vec_name = 'L{0}inv_vec'.format(name)
self.dim_names = ['n'] if dim_names is None else dim_names
self.matrix_name = matrix_name
return
def set_hyperparams(self, prior, **kwargs):
if self._scale_name in kwargs:
n, n2 = np.shape(kwargs[self._scale_name])
else:
raise ValueError("{} must be provided".format(self._scale_name))
if self._df_name not in kwargs:
raise ValueError("{} must be provided".format(self._df_name))
if n != n2:
raise ValueError("{} must be square".format(self._scale_name))
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._scale_name] = kwargs[self._scale_name]
prior.hyperparams[self._df_name] = kwargs[self._df_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
scale_Qinv = prior.hyperparams[self._scale_name]
df_Qinv = prior.hyperparams[self._df_name]
Qinv = array_wishart_rvs(df=df_Qinv, scale=scale_Qinv)
LQinv = np.linalg.cholesky(Qinv)
var_dict[self._lt_vec_name] = LQinv[np.tril_indices_from(LQinv)]
return
def _get_matrix_hyperparam(self, prior):
if self.matrix_name is not None:
mean = prior.hyperparams['mean_{0}'.format(self.matrix_name)]
mean_prec = (mean *
prior.hyperparams['var_col_{0}'.format(self.matrix_name)]**-1)
prec = np.diag(
prior.hyperparams['var_col_{0}'.format(self.matrix_name)]**-1)
else:
raise RuntimeError("matrix_name not specified for {0}".format(
self.name))
return mean, mean_prec, prec
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean, mean_prec, prec = self._get_matrix_hyperparam(prior)
scale_Qinv = prior.hyperparams[self._scale_name]
df_Qinv = prior.hyperparams[self._df_name]
if len(np.shape(prec)) == 1:
S_prevprev = \
prec + sufficient_stat[self.name]['S_prevprev']
S_curprev = \
mean_prec + sufficient_stat[self.name]['S_curprev']
S_curcur = np.outer(mean, mean_prec) + \
sufficient_stat[self.name]['S_curcur']
S_schur = S_curcur - np.outer(S_curprev, S_curprev)/S_prevprev
df_Q = df_Qinv + sufficient_stat[self.name]['S_count']
scale_Qinv = \
np.linalg.inv(np.linalg.inv(scale_Qinv) + S_schur)
Qinv = array_wishart_rvs(df=df_Q, scale=scale_Qinv)
else:
S_prevprev = \
prec + sufficient_stat[self.name]['S_prevprev']
S_curprev = \
mean_prec + sufficient_stat[self.name]['S_curprev']
S_curcur = np.matmul(mean, mean_prec.T) + \
sufficient_stat[self.name]['S_curcur']
S_schur = S_curcur - np.matmul(S_curprev,
np.linalg.solve(S_prevprev, S_curprev.T))
df_Q = df_Qinv + sufficient_stat[self.name]['S_count']
scale_Qinv = \
np.linalg.inv(np.linalg.inv(scale_Qinv) + S_schur)
Qinv = array_wishart_rvs(df=df_Q, scale=scale_Qinv)
LQinv = np.linalg.cholesky(Qinv)
var_dict[self._lt_vec_name] = LQinv[np.tril_indices_from(LQinv)]
return
def logprior(self, prior, logprior, parameters, **kwargs):
scale_Qinv = prior.hyperparams[self._scale_name]
df_Qinv = prior.hyperparams[self._df_name]
logprior += scipy.stats.wishart.logpdf(
getattr(parameters, self._inv_name),
df=df_Qinv, scale=scale_Qinv)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
scale_Qinv = prior.hyperparams[self._scale_name]
df_Qinv = prior.hyperparams[self._df_name]
LQinv = getattr(parameters, self._lt_prec_name)
grad_LQinv = \
(df_Qinv - LQinv.shape[0] - 1) * np.linalg.inv(LQinv.T) - \
np.linalg.solve(scale_Qinv, LQinv)
grad[self._lt_vec_name] = grad_LQinv[np.tril_indices_from(grad_LQinv)]
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
if kwargs.get('from_mean', False):
Qinv = getattr(parameters, self._inv_name)
else:
Qinv = np.eye(getattr(parameters.self._lt_prec_name).shape[0])
df_Qinv = np.shape(Qinv)[-1] + 1.0 + var**-1
scale_Qinv = Qinv/df_Qinv
prior_kwargs[self._scale_name] = scale_Qinv
prior_kwargs[self._df_name] = df_Qinv
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
Qinv = np.eye(n)
df_Qinv = np.shape(Qinv)[-1] + 1.0 + var**-1
scale_Qinv = Qinv/df_Qinv
default_kwargs[self._scale_name] = scale_Qinv
default_kwargs[self._df_name] = df_Qinv
return
class CovariancePrecondHelper(PrecondHelper):
def __init__(self, name='Q', dim_names=None):
self.name = name
self._inv_name = '{0}inv'.format(name)
self._lt_vec_name = 'L{0}inv_vec'.format(name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Qinv = getattr(parameters, self._inv_name)
LQinv_grad = np.zeros(Qinv.shape)
LQinv_grad[np.tril_indices_from(LQinv_grad)] = grad[self._lt_vec_name]
precond_LQinv = np.dot(0.5*Qinv, LQinv_grad)
precond_grad[self._lt_vec_name] = \
precond_LQinv[np.tril_indices_from(precond_LQinv)]
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = tril_vector_to_mat(getattr(parameters, self._lt_vec_name))
LQinv_noise = np.dot(np.sqrt(0.5)*LQinv,
np.random.normal(loc=0, size=LQinv.shape)
)
noise[self._lt_vec_name] = LQinv_noise[np.tril_indices_from(LQinv_noise)]
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
LQinv_vec = getattr(parameters, self._lt_vec_name)
n = int(np.sqrt(len(LQinv_vec)*2))
correction[self._lt_vec_name] = 0.5 * (n+1) * LQinv_vec
return
# Multiple Covariance
class CovariancesParamHelper(ParamHelper):
def __init__(self, name='Q', dim_names=None):
self.name = name
self._lt_prec_name = 'L{}inv'.format(name)
self._lt_vec_name = 'L{}inv_vec'.format(name)
self._inv_name = '{}inv'.format(name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
# Handle kwargs input
if self._lt_vec_name in kwargs:
# LQinv_vec
LQinv_vec = np.array(kwargs[self._lt_vec_name]).astype(float)
num_states, L = np.shape(LQinv_vec)
n = int(np.sqrt(L*2))
param.var_dict[self._lt_vec_name] = LQinv_vec
param._set_check_dim(**{
self.dim_names[0]: n,
self.dim_names[1]: num_states,
})
elif self._lt_prec_name in kwargs:
# LQinv
num_states, n, n2 = np.shape(kwargs[self._lt_prec_name])
if n != n2:
raise ValueError("{} must be square matrix".format(
self._lt_prec_name))
LQinv = np.array(kwargs[self._lt_prec_name]).astype(float)
setattr(param, self._lt_prec_name, LQinv)
param._set_check_dim(**{
self.dim_names[0]: n,
self.dim_names[1]: num_states,
})
elif self.name in kwargs:
# Q
num_states, n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrix".format(
self.name))
Q = np.array(kwargs[self.name]).astype(float)
LQinv = np.array([
np.linalg.cholesky(np.linalg.inv(Q_k)) for Q_k in Q
])
setattr(param, self._lt_prec_name, LQinv)
param._set_check_dim(**{
self.dim_names[0]: n,
self.dim_names[1]: num_states,
})
else:
raise ValueError("{} not provided".format(self._lt_prec_name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
setattr(param, self._lt_prec_name, name_kwargs['fixed'].copy())
if name_kwargs.get('thresh', True):
LQinv = getattr(param, self._lt_prec_name)
for k, LQinv_k in enumerate(LQinv):
if np.any(np.diag(LQinv_k) < 0.0):
logger.info(
"Reflecting {0}[{2}]: {1} < 0.0".format(
self._lt_prec_name, LQinv_k, k)
)
LQinv_k[:] = np.linalg.cholesky(
np.dot(LQinv_k, LQinv_k.T) + \
np.eye(param.dim[self.dim_names[0]])*1e-16
)
setattr(param, self._lt_prec_name, LQinv)
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
LQinv_vec = var_dict[self._lt_vec_name]
vector_list.extend([LQinv_vec_k for LQinv_vec_k in LQinv_vec])
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
LQinv_vec = np.zeros((num_states, (n+1)*n//2))
for k in range(num_states):
LQinv_vec[k] = vector[vector_index + k*(n+1)*n//2:
vector_index + (k+1)*(n+1)*n//2]
var_dict[self._lt_vec_name] = LQinv_vec
return vector_index+num_states*(n+1)*n//2
def get_properties(self):
properties = {}
properties[self._lt_vec_name] = property(
fget=get_value_func(self._lt_vec_name),
fset=set_value_func(self._lt_vec_name),
doc="{0} is {2} of ({1}+1)*{1}/2 lower tri mat vectors".format(
self._lt_vec_name, self.dim_names[0], self.dim_names[1]),
)
properties[self._lt_prec_name] = property(
fget=get_LQinvs_func(self.name),
fset=set_LQinvs_func(self.name),
doc="{0} is {2} of {1} by {1} lower triangular matrices".format(
self._lt_prec_name, self.dim_names[0], self.dim_names[1]),
)
properties[self._inv_name] = property(
fget=get_Qinvs_func(self.name),
doc="{0} is {2} of {1} by {1} precision matrices".format(
self._inv_name, self.dim_names[0], self.dim_names[1]),
)
properties[self.name] = property(
fget=get_Qs_func(self.name),
doc="{0} is {2} of {1} by {1} covariance matrices".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
def get_LQinvs_func(name):
def fget(self):
LQinv_vec = getattr(self, "L{0}inv_vec".format(name))
LQinv = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in LQinv_vec])
return LQinv
return fget
def set_LQinvs_func(name):
def fset(self, value):
LQinv_vec = np.array([
LQinv_k[np.tril_indices_from(LQinv_k)]
for LQinv_k in value
])
self.var_dict["L{0}inv_vec".format(name)] = LQinv_vec
return
return fset
def get_Qinvs_func(name):
def fget(self):
LQinv = getattr(self, "L{0}inv".format(name))
Qinv = np.array([LQinv_k.dot(LQinv_k.T) + 1e-16*np.eye(LQinv_k.shape[0])
for LQinv_k in LQinv])
return Qinv
return fget
def get_Qs_func(name):
def fget(self):
Qinv = getattr(self, "{0}inv".format(name))
if Qinv.shape[1] == 1:
Q = np.array([Qinv_k**-1 for Qinv_k in Qinv])
else:
Q = np.array([pos_def_mat_inv(Qinv_k) for Qinv_k in Qinv])
return Q
return fget
class CovariancesPriorHelper(PriorHelper):
def __init__(self, name='Q', dim_names=None, matrix_name=None):
self.name = name
self._scale_name = 'scale_{0}inv'.format(name)
self._df_name = 'df_{0}inv'.format(name)
self._inv_name = '{0}inv'.format(name)
self._lt_vec_name = 'L{0}inv_vec'.format(name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
self.matrix_name = matrix_name
return
def set_hyperparams(self, prior, **kwargs):
if self._scale_name in kwargs:
num_states, n, n2 = np.shape(kwargs[self._scale_name])
else:
raise ValueError("{} must be provided".format(self._scale_name))
if self._df_name in kwargs:
num_states2 = np.shape(kwargs[self._df_name])[0]
else:
raise ValueError("{} must be provided".format(self._df_name))
if n != n2:
raise ValueError("{} must be square".format(self._scale_name))
if num_states != num_states2:
raise ValueError("scale and df for {} don't match".format(self.name))
prior._set_check_dim(**{
self.dim_names[0]: n,
self.dim_names[1]: num_states,
})
prior.hyperparams[self._scale_name] = kwargs[self._scale_name]
prior.hyperparams[self._df_name] = kwargs[self._df_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
scale_Qinv = prior.hyperparams[self._scale_name]
df_Qinv = prior.hyperparams[self._df_name]
Qinvs = [array_wishart_rvs(df=df_Qinv_k, scale=scale_Qinv_k)
for df_Qinv_k, scale_Qinv_k in zip(df_Qinv, scale_Qinv)]
LQinv_vec = np.array([
np.linalg.cholesky(Qinv_k)[np.tril_indices_from(Qinv_k)]
for Qinv_k in Qinvs])
var_dict[self._lt_vec_name] = LQinv_vec
return
def _get_matrix_hyperparam(self, prior):
if self.matrix_name is not None:
mean = prior.hyperparams['mean_{0}'.format(self.matrix_name)]
prec = prior.hyperparams['var_col_{0}'.format(self.matrix_name)]**-1
else:
raise RuntimeError("matrix_name not specified for {0}".format(
self.name))
return mean, prec
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean, prec = self._get_matrix_hyperparam(prior)
scale_Qinv = prior.hyperparams[self._scale_name]
df_Qinv = prior.hyperparams[self._df_name]
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
Qinvs = [None for _ in range(num_states)]
if len(np.shape(prec)) == 1:
for k in range(num_states):
S_prevprev = prec[k] + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = prec[k] * mean[k] + \
sufficient_stat[self.name]['S_curprev'][k]
S_curcur = np.outer(mean[k], prec[k]*mean[k]) + \
sufficient_stat[self.name]['S_curcur'][k]
S_schur = S_curcur - np.outer(S_curprev, S_curprev)/S_prevprev
df_Q_k = df_Qinv[k] + sufficient_stat[self.name]['S_count'][k]
scale_Qinv_k = \
np.linalg.inv(np.linalg.inv(scale_Qinv[k]) + S_schur)
Qinvs[k] = array_wishart_rvs(df=df_Q_k, scale=scale_Qinv_k)
else:
for k in range(num_states):
S_prevprev = np.diag(prec[k]) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = prec[k] * mean[k] + \
sufficient_stat[self.name]['S_curprev'][k]
S_curcur = np.matmul(mean[k], (prec[k] * mean[k]).T) + \
sufficient_stat[self.name]['S_curcur'][k]
S_schur = S_curcur - np.matmul(S_curprev,
np.linalg.solve(S_prevprev, S_curprev.T))
df_Q_k = df_Qinv[k] + sufficient_stat[self.name]['S_count'][k]
scale_Qinv_k = \
np.linalg.inv(np.linalg.inv(scale_Qinv[k]) + S_schur)
Qinvs[k] = array_wishart_rvs(df=df_Q_k, scale=scale_Qinv_k)
LQinv_vec = np.array([
np.linalg.cholesky(Qinv_k)[np.tril_indices_from(Qinv_k)]
for Qinv_k in Qinvs])
var_dict[self._lt_vec_name] = LQinv_vec
return
def logprior(self, prior, logprior, parameters, **kwargs):
scale_Qinv = prior.hyperparams[self._scale_name]
df_Qinv = prior.hyperparams[self._df_name]
Qinv = getattr(parameters, self._inv_name)
for Qinv_k, df_Qinv_k, scale_Qinv_k in zip(
Qinv, df_Qinv, scale_Qinv):
logprior += scipy.stats.wishart.logpdf(Qinv_k,
df=df_Qinv_k, scale=scale_Qinv_k)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
scale_Qinv = prior.hyperparams[self._scale_name]
df_Qinv = prior.hyperparams[self._df_name]
LQinv = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in getattr(parameters, self._lt_vec_name)])
grad_LQinv = np.array([
(df_Qinv_k - LQinv.shape[1] - 1) * np.linalg.inv(LQinv_k.T) - \
np.linalg.solve(scale_Qinv_k, LQinv_k)
for LQinv_k, df_Qinv_k, scale_Qinv_k in zip(
LQinv, df_Qinv, scale_Qinv)
])
grad_LQinv_vec = np.array([
grad_LQinv_k[np.tril_indices_from(grad_LQinv_k)]
for grad_LQinv_k in grad_LQinv])
grad[self._lt_vec_name] = grad_LQinv_vec
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
Qinv = getattr(parameters, self._inv_name)
if not kwargs.get('from_mean', False):
Qinv = np.array([np.eye(Qinv.shape[1])
for _ in range(Qinv.shape[0])])
df_Qinv = np.shape(Qinv)[-1] + 1.0 + var**-1
scale_Qinv = Qinv/df_Qinv
df_Qinv = np.array([df_Qinv+0 for k in range(Qinv.shape[0])])
prior_kwargs[self._scale_name] = scale_Qinv
prior_kwargs[self._df_name] = df_Qinv
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
Qinv = np.array([np.eye(n) for _ in range(num_states)])
df_Qinv = np.shape(Qinv)[-1] + 1.0 + var**-1
scale_Qinv = Qinv/df_Qinv
df_Qinv = np.array([df_Qinv+0 for k in range(Qinv.shape[0])])
default_kwargs[self._scale_name] = scale_Qinv
default_kwargs[self._df_name] = df_Qinv
return
class CovariancesPrecondHelper(PrecondHelper):
def __init__(self, name='Q', dim_names=None):
self.name = name
self._inv_name = '{0}inv'.format(name)
self._lt_vec_name = 'L{0}inv_vec'.format(name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Qinv = getattr(parameters, self._inv_name)
num_states, n, _ = Qinv.shape
precond_LQinv_vec = np.zeros((num_states, (n+1)*n//2))
for k in range(num_states):
LQinv_grad_k = np.zeros((n,n))
LQinv_grad_k[np.tril_indices(n)] = grad[self._lt_vec_name][k]
precond_LQinv_k = np.dot(0.5*Qinv[k], LQinv_grad_k)
precond_LQinv_vec[k] = precond_LQinv_k[np.tril_indices(n)]
precond_grad[self._lt_vec_name] = precond_LQinv_vec
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in getattr(parameters, self._lt_vec_name)])
num_states, n, _ = LQinv.shape
LQinv_noise = np.array([
np.dot(np.sqrt(0.5)*LQinv[k],
np.random.normal(loc=0, size=(n, n))
)
for k in range(num_states)
])
noise[self._lt_vec_name] = np.array([LQinv_noise_k[np.tril_indices(n)]
for LQinv_noise_k in LQinv_noise])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
LQinv_vec = getattr(parameters, self._lt_vec_name)
n = int(np.sqrt(np.shape(LQinv_vec)[1]*2))
correction[self._lt_vec_name] = 0.5 * (n+1) * LQinv_vec
return
# Single Shared Covariance
### TODO
if __name__ == "__main__":
# Demo of Parameters
class CovParameters(BaseParameters):
""" Cov Parameters """
_param_helper_list = [
CovarianceParamHelper(name='Q', dim_names=['n'])
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "CovParameters:"
my_str += "\nQ:\n" + str(self.Q)
return my_str
class CovPrior(BasePrior):
""" Cov Prior """
_Parameters = CovParameters
_prior_helper_list = [
CovariancePriorHelper(name='Q', dim_names=['n'])
]
| 28,228 | 39.384835 | 82 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/variables/matrices.py | import numpy as np
import scipy.stats
from ..base_parameters import (
ParamHelper, PriorHelper, PrecondHelper,
get_value_func, get_hyperparam_func, get_dim_func,
set_value_func, set_hyperparam_func,
)
from .._utils import (
normal_logpdf,
matrix_normal_logpdf,
pos_def_mat_inv,
varp_stability_projection,
tril_vector_to_mat,
)
import logging
logger = logging.getLogger(name=__name__)
## Implementations of Vector, Square, Rectangular Parameters
# Single Square
class VectorParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n = np.shape(kwargs[self.name])
if np.ndim(kwargs[self.name]) != 1:
raise ValueError("{} must be vector".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
mu = np.reshape(vector[vector_index:vector_index+n], (n))
var_dict[self.name] = mu
return vector_index+n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} vector".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
if not np.isscalar(kwargs[self._var_col_name]):
raise ValueError("{} must be scalar".format(self._var_col_name))
else:
raise ValueError("{} must be provided".format(self._var_col_name))
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = np.random.multivariate_normal(
mean=mean_mu,
cov=var_col_mu*pos_def_mat_inv(Qinv),
)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = var_col_mu**-1 + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_mu * var_col_mu**-1 + \
sufficient_stat[self.name]['S_curprev']
post_mean_mu = S_curprev/S_prevprev
var_dict[self.name] = np.random.multivariate_normal(
mean=post_mean_mu,
cov=pos_def_mat_inv(Qinv)/S_prevprev,
)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += normal_logpdf(parameters.var_dict[self.name],
mean=mean_mu,
Lprec=var_col_mu_k**-0.5 * LQinv,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
mu = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(var_col_mu**-1 * Qinv, mu - mean_mu)
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = var
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_mu = np.zeros((n))
var_col_mu = var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=(LQinv.shape[0]))
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class VectorsParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
mu = np.reshape(vector[vector_index:vector_index+num_states*n],
(num_states, n))
var_dict[self.name] = mu
return vector_index+num_states*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is {2} {1} vectors".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorsPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # num_states by n
self._var_col_name = 'var_col_{0}'.format(name) # num_states by n
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
mus = [None for k in range(prior.dim[self.dim_names[1]])]
for k in range(len(mus)):
mus[k] = np.random.multivariate_normal(
mean=mean_mu[k],
cov=var_col_mu[k]*pos_def_mat_inv(Qinvs[k]),
)
var_dict[self.name] = np.array(mus)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
mus = [None for k in range(num_states)]
for k in range(len(mus)):
S_prevprev = var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_mu[k] * var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
post_mean_mu_k = S_curprev/S_prevprev
mus[k] = np.random.multivariate_normal(
mean=post_mean_mu_k,
cov=pos_def_mat_inv(Qinvs[k])/S_prevprev,
)
var_dict[self.name] = np.array(mus)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in parameters.var_dict[self._lt_vec_name]])
else:
LQinvs = np.array([np.eye(n)
for _ in range(num_states)])
for mu_k, mean_mu_k, var_col_mu_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_mu, var_col_mu, LQinvs):
logprior += normal_logpdf(mu_k,
mean=mean_mu_k,
Lprec=var_col_mu_k**-0.5 * LQinv_k,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mu = parameters.var_dict[self.name]
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(n)
for _ in range(num_states)])
grad[self.name] = np.array([
-1.0 * np.dot(var_col_mu[k]**-1 * Qinvs[k], mu[k] - mean_mu[k])
for k in range(num_states)])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = np.array([
var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_mu = np.zeros((num_states, n))
var_col_mu = np.ones((num_states))*var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorsPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv.shape[-1])
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Square
class SquareMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
A = np.reshape(vector[vector_index:vector_index+n**2], (n, n))
var_dict[self.name] = A
return vector_index+n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} by {1} matrix".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatrixPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
n3 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if n != n3:
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = scipy.stats.matrix_normal(
mean=mean_A,
rowcov=pos_def_mat_inv(Qinv),
colcov=np.diag(var_col_A),
).rvs()
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = np.diag(var_col_A**-1) + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_A * var_col_A**-1 + \
sufficient_stat[self.name]['S_curprev']
var_dict[self.name] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinv),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += matrix_normal_logpdf(parameters.var_dict[self.name],
mean=mean_A,
Lrowprec=LQinv,
Lcolprec=np.diag(var_col_A**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(Qinv, A - mean_A) * var_col_A**-1
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.ones(A.shape[0])*var
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_A = np.zeros((n,n))
var_col_A = np.ones(n)*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=LQinv.shape)
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class SquareMatricesParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
for k, A_k in enumerate(A):
A_k = varp_stability_projection(A_k,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
A[k] = A_k
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
A = np.reshape(vector[vector_index:vector_index+num_states*n**2],
(num_states, n, n))
var_dict[self.name] = A
return vector_index+num_states*n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {2} of {1} by {1} matrices".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatricesPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2, n3 = np.shape(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if (n != n3) or (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
As[k] = scipy.stats.matrix_normal(
mean=mean_A[k],
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=np.diag(var_col_A[k]),
).rvs()
var_dict[self.name] = np.array(As)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
S_prevprev = np.diag(var_col_A[k]**-1) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_A[k] * var_col_A[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
As[k] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
var_dict[self.name] = np.array(As)
return
def logprior(self, prior, logprior, parameters, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv_vec = getattr(parameters, self._lt_vec_name)
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in LQinv_vec])
else:
LQinvs = np.array([np.eye(n) for _ in range(num_states)])
for A_k, mean_A_k, var_col_A_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_A, var_col_A, LQinvs):
logprior += matrix_normal_logpdf(A_k,
mean=mean_A_k,
Lrowprec=LQinv_k,
Lcolprec=np.diag(var_col_A_k**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
grad[self.name] = np.array([
-1.0 * np.dot(Qinvs[k], A[k] - mean_A[k]) * var_col_A[k]**-1
for k in range(prior.dim[self.dim_names[1]])
])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.array([
np.ones(A.shape[0])*var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_A = np.zeros((num_states, n,n))
var_col_A = np.ones((num_states,n))*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatricesPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv[k].shape)
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Rectangular (m by n)
class RectMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['m','n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
m, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', False):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
if name_kwargs.get('fixed_eye', False):
k = min(param.dim[self.dim_names[0]], param.dim[self.dim_names[1]])
A = param.var_dict[self.name]
A[0:k, 0:k] = np.eye(k)
param.var_dict[self.name] = A
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
A = np.reshape(vector[vector_index:vector_index+m*n], (m, n))
var_dict[self.name] = A
return vector_index+m*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} by {2} matrix".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class RectMatrixPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # m by n ndarray
self._var_col_name = 'var_col_{0}'.format(name) # n ndarray
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name) # m by m ndarray
self.dim_names = ['m', 'n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
m, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
n2 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = scipy.stats.matrix_normal(
mean=mean_A,
rowcov=pos_def_mat_inv(Qinv),
colcov=np.diag(var_col_A),
).rvs()
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = np.diag(var_col_A**-1) + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_A * var_col_A**-1 + \
sufficient_stat[self.name]['S_curprev']
var_dict[self.name] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinv),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += matrix_normal_logpdf(parameters.var_dict[self.name],
mean=mean_A,
Lrowprec=LQinv,
Lcolprec=np.diag(var_col_A**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(Qinv, A - mean_A) * var_col_A**-1
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.ones(A.shape[1])*var
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_A = np.zeros((m,n))
var_col_A = np.ones(n)*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class RectMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['m', 'n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
m = parameters.dim[self.dim_names[0]]
n = parameters.dim[self.dim_names[1]]
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=(m, n))
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Rectangular
class RectMatricesParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, m, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
self.dim_names[2]: num_states,
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', False):
A = param.var_dict[self.name]
for k, A_k in enumerate(A):
A_k = varp_stability_projection(A_k,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
A[k] = A_k
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
if name_kwargs.get('fixed_eye', False):
k = min(param.dim[self.dim_names[0]], param.dim[self.dim_names[1]])
A = param.var_dict[self.name]
for kk in range(self.num_states):
A[kk, 0:k, 0:k] = np.eye(k)
param.var_dict[self.name] = A
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
num_states = kwargs[self.dim_names[2]]
A = np.reshape(vector[vector_index:vector_index+num_states*m*n],
(num_states, m, n))
var_dict[self.name] = A
return vector_index+num_states*m*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {3} by {1} by {2} matrices".format(
self.name, self.dim_names[0],
self.dim_names[1], self.dim_names[2]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class RectMatricesPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # num_states x m x n
self._var_col_name = 'var_col_{0}'.format(name) # num_states x n
self._var_row_name = var_row_name # num_states x m x m
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, m, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2, n2 = np.shape(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if (n != n2) or (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
self.dim_names[2]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(m)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(m) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
As[k] = scipy.stats.matrix_normal(
mean=mean_A[k],
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=np.diag(var_col_A[k]),
).rvs()
var_dict[self.name] = np.array(As)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(m)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(m) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
S_prevprev = np.diag(var_col_A[k]**-1) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_A[k] * var_col_A[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
As[k] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
var_dict[self.name] = np.array(As)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in parameters.var_dict[self._lt_vec_name]])
else:
LQinvs = np.array([np.eye(m) for _ in range(num_states)])
for A_k, mean_A_k, var_col_A_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_A, var_col_A, LQinvs):
logprior += matrix_normal_logpdf(A_k,
mean=mean_A_k,
Lrowprec=LQinv_k,
Lcolprec=np.diag(var_col_A_k**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[2]]])
grad[self.name] = np.array([
-1.0 * np.dot(Qinvs[k], A[k] - mean_A[k]) * var_col_A[k]**-1
for k in range(prior.dim[self.dim_names[2]])
])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.array([
np.ones(A.shape[2])*var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
num_states = kwargs[self.dim_names[2]]
var = kwargs['var']
mean_A = np.zeros((num_states,m,n))
var_col_A = np.ones((num_states,n))*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class RectMatricesPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
m = parameters.dim[self.dim_names[0]]
n = parameters.dim[self.dim_names[1]]
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=(m,n))
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
if __name__ == "__main__":
# Demo of Parameters
class SquareParameters(BaseParameters):
""" Square Parameters """
_param_helper_list = [
SquareMatrixParamHelper(name='A', dim_names=['n'])
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "SquareParameters:"
my_str += "\nA:\n" + str(self.A)
return my_str
class SquareMatrixPrior(BasePrior):
""" Square Prior """
_Parameters = SquareParameters
_prior_helper_list = [
SquareMatrixPriorHelper(name='A', dim_names=['n'], var_row_name=None)
]
| 57,109 | 39.417551 | 81 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/variables/__init__.py | from .matrices import (
VectorParamHelper,
VectorPriorHelper,
VectorPrecondHelper,
VectorsParamHelper,
VectorsPriorHelper,
VectorsPrecondHelper,
SquareMatrixParamHelper,
SquareMatrixPriorHelper,
SquareMatrixPrecondHelper,
SquareMatricesParamHelper,
SquareMatricesPriorHelper,
SquareMatricesPrecondHelper,
RectMatrixParamHelper,
RectMatrixPriorHelper,
RectMatrixPrecondHelper,
RectMatricesParamHelper,
RectMatricesPriorHelper,
RectMatricesPrecondHelper,
)
from .covariance import (
CovarianceParamHelper,
CovariancePriorHelper,
CovariancePrecondHelper,
CovariancesParamHelper,
CovariancesPriorHelper,
CovariancesPrecondHelper,
)
from .probweight import (
TransitionMatrixParamHelper,
TransitionMatrixPriorHelper,
TransitionMatrixPrecondHelper,
BernoulliParamHelper,
BernoulliPriorHelper,
BernoulliPrecondHelper,
)
| 1,077 | 28.135135 | 38 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/variables/probweight.py | import numpy as np
import scipy.stats
from scipy.special import logit, expit, logsumexp
from ..base_parameters import (
ParamHelper, PriorHelper, PrecondHelper,
get_value_func, get_hyperparam_func, get_dim_func,
set_value_func, set_hyperparam_func,
)
import logging
logger = logging.getLogger(name=__name__)
## Implementations of Bernoulli + Transition Matrix Parameters
# Bernoulli Var
class BernoulliParamHelper(ParamHelper):
def __init__(self, name='pi'):
self.name = name
self._logit_name = 'logit_{}'.format(name)
self.dim_names = []
return
def set_var(self, param, **kwargs):
if self._logit_name in kwargs:
if np.size(kwargs[self._logit_name]) != 1:
raise ValueError("{} must be 1D scalar".format(self._logit_name))
param.var_dict[self._logit_name] = np.atleast_1d(
kwargs[self._logit_name]).astype(float)
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self._logit_name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self._logit_name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
logit_pi = np.reshape(vector[vector_index:vector_index+1], (1))
var_dict[self._logit_name] = logit_pi
return vector_index+1
def get_properties(self):
properties = {}
properties[self._logit_name] = property(
fget=get_value_func(self._logit_name),
fset=set_value_func(self._logit_name),
doc="{0} is a scalar log-odds".format(
self._logit_name),
)
properties[self.name] = property(
fget=get_pi_func(self.name),
doc="{0} is a probability in [0,1]".format(self.name),
)
return properties
def get_pi_func(name):
def fget(self):
logit_pi = getattr(self, "logit_{0}".format(name))
pi = expit(logit_pi)
return pi
return fget
class BernoulliPriorHelper(PriorHelper):
def __init__(self, name='pi', dim_names=None):
self.name = name
self._logit_name = 'logit_{0}'.format(name)
self._alpha_name = 'alpha_{0}'.format(name)
self._beta_name = 'beta_{0}'.format(name)
self.dim_names = [] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._alpha_name in kwargs:
alpha = kwargs[self._alpha_name]
if np.size(alpha) != 1 or alpha < 0:
raise ValueError("{} must be a nonnegative scalar".format(
self._alpha_name))
prior.hyperparams[self._alpha_name] = alpha
else:
raise ValueError("{} must be provided".format(self._alpha_name))
if self._beta_name in kwargs:
beta = kwargs[self._beta_name]
if np.size(beta) != 1 or beta < 0:
raise ValueError("{} must be a nonnegative scalar".format(
self._beta_name))
prior.hyperparams[self._beta_name] = beta
else:
raise ValueError("{} must be provided".format(self._beta_name))
return
def sample_prior(self, prior, var_dict, **kwargs):
alpha = prior.hyperparams[self._alpha_name]
beta = prior.hyperparams[self._beta_name]
pi = scipy.stats.beta(a=alpha, b=beta).rvs()
var_dict[self._logit_name] = np.array(logit(pi))
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
raise NotImplementedError()
return
def logprior(self, prior, logprior, parameters, **kwargs):
alpha = prior.hyperparams[self._alpha_name]
beta = prior.hyperparams[self._beta_name]
logprior += scipy.stats.beta.logpdf(
getattr(parameters, self.name),
a=alpha,
b=beta,
)
return np.asscalar(logprior)
def grad_logprior(self, prior, grad, parameters, **kwargs):
alpha = prior.hyperparams[self._alpha_name]
beta = prior.hyperparams[self._beta_name]
pi = getattr(parameters, self.name)
grad[self._logit_name] = np.array((alpha-1)*(1-pi) - (beta-1)*pi)
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
pi = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
alpha = pi, 1-pi
else:
alpha, beta = 0.5, 0.5
prior_kwargs[self._alpha_name] = alpha
prior_kwargs[self._beta_name] = beta
return
def get_default_kwargs(self, default_kwargs, **kwargs):
default_kwargs[self._alpha_name] = 0.5
default_kwargs[self._beta_name] = 0.5
return
class BernoulliPrecondHelper(PrecondHelper):
def __init__(self, name='pi', dim_names=None):
self.name = name
self._logit_name = 'logit_{}'.format(self.name)
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
precond_grad[self._logit_name] = grad[self._logit_name]
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
noise[self._logit_name] = np.random.normal(loc=0, size=(1))
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self._logit_name] = np.zeros((1), dtype=float)
return
# Transition Matrix
class TransitionMatrixParamHelper(ParamHelper):
def __init__(self, name='pi', dim_names=None):
self.name = name
self._logit_name = "logit_{0}".format(name)
self._expanded_name = "expanded_{0}".format(name)
self.dim_names = ['num_states', '{0}_type'.format(name)]
if dim_names is not None:
self.dim_names = dim_names
return
def set_var(self, param, **kwargs):
if self._logit_name in kwargs:
num_states, num_states2 = np.shape(kwargs[self._logit_name])
if num_states != num_states2:
raise ValueError("{} must be square matrix".format(self._logit_name))
param.var_dict[self._logit_name] = np.array(kwargs[self._logit_name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: num_states,
self.dim_names[1]: 'logit',
})
elif self._expanded_name in kwargs:
num_states, num_states2 = np.shape(kwargs[self._expanded_name])
if num_states != num_states2:
raise ValueError("{} must be square matrix".format(self._expanded_name))
param.var_dict[self._expanded_name] = np.array(kwargs[self._expanded_name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: num_states,
self.dim_names[1]: 'expanded',
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
pi_type = getattr(param, self.dim_names[1])
if pi_type == 'logit' and name_kwargs.get('center', False):
# Center logit_pi to be stable
logit_pi = param.var_dict[self._logit_name]
logit_pi -= np.outer(np.mean(logit_pi, axis=1),
np.ones(self.num_states))
if pi_type == 'expanded':
param.var_dict[self._expanded_name] = \
np.abs(param.var_dict[self._expanded_name])
if name_kwargs.get('center', False):
param.var_dict[self._expanded_name] /= \
np.sum(param.var_dict[self._expanded_name], axis=1)
if name_kwargs.get('fixed') is not None:
if pi_type == 'logit':
param.var_dict[self._logit_name] = name_kwargs['fixed'].copy()
elif pi_type == 'expanded':
param.var_dict[self._expanded_name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
if self._logit_name in var_dict:
vector_list.append(var_dict[self._logit_name].flatten())
elif self._expanded_name in var_dict:
vector_list.append(var_dict[self._expanded_name].flatten())
else:
raise RuntimeError("Missin either {0} or {1} in var_dict".format(
self._logit_name, self._expanded_name))
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
num_states = kwargs[self.dim_names[0]]
pi_type = kwargs[self.dim_names[1]]
pi_mat = np.reshape(
vector[vector_index:vector_index+num_states**2],
(num_states, num_states))
if pi_type == 'logit':
var_dict[self._logit_name] = pi_mat
elif pi_type == 'expanded':
var_dict[self._expanded_name] = pi_mat
else:
raise ValueError("Unrecognized {0} {1}".format(
self.dim_names[1], pi_type))
return vector_index+num_states**2
def get_properties(self):
properties_kwargs = dict(
pi_type_name=self.dim_names[1],
logit_pi_name=self._logit_name,
expanded_pi_name=self._expanded_name,
)
# Parameter Values
properties = {}
properties[self.name] = property(
fget=get_pi_func2(**properties_kwargs),
doc="{0} is a {1} by {1} stochastic matrix".format(
self.name, self.dim_names[0]),
)
properties[self._logit_name] = property(
fget=get_logit_pi_func(**properties_kwargs),
fset=set_logit_pi_func(**properties_kwargs),
doc="{0} is the row-wise logit of {1}".format(
self._logit_name, self.name),
)
properties[self._expanded_name] = property(
fget=get_expanded_pi_func(**properties_kwargs),
fset=set_expanded_pi_func(**properties_kwargs),
doc="{0} is the row-wise expanded mean of {1}".format(
self._logit_name, self.name),
)
# Dims
properties[self.dim_names[0]] = property(
fget=get_dim_func(self.dim_names[0]),
)
properties[self.dim_names[1]] = property(
fget=get_dim_func(self.dim_names[1]),
fset=set_pi_type(**properties_kwargs),
)
return properties
def get_pi_func2(pi_type_name, logit_pi_name, expanded_pi_name):
def fget(self):
pi_type = getattr(self, pi_type_name)
if pi_type == 'logit':
logit_pi = self.var_dict[logit_pi_name]
pi = np.exp(logit_pi - np.outer(
logsumexp(logit_pi, axis=1),
np.ones(logit_pi.shape[1])
))
elif pi_type == 'expanded':
expanded_pi = self.var_dict[expanded_pi_name]
pi = np.abs(expanded_pi) / np.outer(
np.sum(np.abs(expanded_pi), axis=1),
np.ones(expanded_pi.shape[1])
)
else:
raise ValueError("Unrecognized {0} {1}".format(
pi_type_name, pi_type))
return pi
return fget
def get_logit_pi_func(pi_type_name, logit_pi_name, expanded_pi_name):
def fget(self):
pi_type = getattr(self, pi_type_name)
if pi_type == 'logit':
logit_pi = self.var_dict[logit_pi_name]
elif pi_type == 'expanded':
logit_pi = np.log(np.abs(self.var_dict[expanded_pi_name]) + 1e-99)
logit_pi -= np.outer(
np.mean(logit_pi, axis=1),
np.ones(logit_pi.shape[1])
)
else:
raise ValueError("Unrecognized {0} {1}".format(
pi_type_name, pi_type))
return logit_pi
return fget
def set_logit_pi_func(pi_type_name, logit_pi_name, expanded_pi_name):
def fset(self, value):
pi_type = getattr(self, pi_type_name)
if pi_type == 'logit':
self.var_dict[logit_pi_name] = value
else:
raise ValueError("{0} != 'logit'".format(pi_type_name))
return
return fset
def get_expanded_pi_func(pi_type_name, logit_pi_name, expanded_pi_name):
def fget(self):
pi_type = getattr(self, pi_type_name)
if pi_type == 'logit':
logit_pi = self.var_dict[logit_pi_name]
expanded_pi = np.exp(logit_pi - np.outer(
logsumexp(logit_pi, axis=1),
np.ones(logit_pi.shape[1])
))
elif pi_type == 'expanded':
expanded_pi = self.var_dict[expanded_pi_name]
else:
raise ValueError("Unrecognized {0} {1}".format(
pi_type_name, pi_type))
return expanded_pi
return fget
def set_expanded_pi_func(pi_type_name, logit_pi_name, expanded_pi_name):
def fset(self, value):
pi_type = getattr(self, pi_type_name)
if pi_type == 'expanded':
self.var_dict[expanded_pi_name] = value
else:
raise ValueError("{0} != 'expanded'".format(pi_type_name))
return
return fset
def set_pi_type(pi_type_name, logit_pi_name, expanded_pi_name):
def fset(self, value):
pi_type = getattr(self, pi_type_name)
if pi_type == value:
return
else:
if value == 'logit':
logit_pi = getattr(self, logit_pi_name)
self.var_dict[logit_pi_name] = logit_pi
self.var_dict.pop(expanded_pi_name)
self.dim[pi_type_name] = value
elif value == 'expanded':
expanded_pi = getattr(self, expanded_pi_name)
self.var_dict[expanded_pi_name] = expanded_pi
self.var_dict.pop(logit_pi_name)
self.dim[pi_type_name] = value
else:
raise ValueError("Unrecognized {0} {1}".format(
pi_type_name, value))
return
return fset
class TransitionMatrixPriorHelper(PriorHelper):
def __init__(self, name='pi', dim_names=None, var_row_name=None):
self.name = name
self._logit_name = "logit_{0}".format(name)
self._expanded_name = "expanded_{0}".format(name)
self._type_name = "{0}_type".format(name)
self._alpha = 'alpha_{0}'.format(name)
self.dim_names = ['num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._alpha in kwargs:
num_states, num_states2 = np.shape(kwargs[self._alpha])
else:
raise ValueError("{} must be provided".format(self._alpha))
if num_states != num_states2:
raise ValueError("{} must be square".format(self._alpha))
prior._set_check_dim(**{self.dim_names[0]: num_states})
prior.hyperparams[self._alpha] = kwargs[self._alpha]
return
def sample_prior(self, prior, var_dict, **kwargs):
pi_type = kwargs.get(self._type_name, 'logit')
alpha = prior.hyperparams[self._alpha]
pi = np.array([np.random.dirichlet(alpha_k) for alpha_k in alpha])
if pi_type == 'logit':
var_dict[self._logit_name] = np.log(pi+1e-99)
elif pi_type == 'expanded':
var_dict[self._expanded_name] = pi
else:
raise ValueError("Unrecognized {0} {1}".format(
self._type_name, pi_type))
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
pi_type = kwargs.get(self._type_name, 'logit')
alpha = prior.hyperparams[self._alpha] + \
sufficient_stat[self.name]['alpha']
pi = np.array([np.random.dirichlet(alpha_k) for alpha_k in alpha])
if pi_type == 'logit':
var_dict[self._logit_name] = np.log(pi+1e-99)
elif pi_type == 'expanded':
var_dict[self._expanded_name] = pi
else:
raise ValueError("Unrecognized {0} {1}".format(
self._type_name, pi_type))
return
def logprior(self, prior, logprior, parameters, **kwargs):
alpha = prior.hyperparams[self._alpha]
pi = getattr(parameters, self.name)
for pi_k, alpha_k in zip(pi, alpha):
logprior += scipy.stats.dirichlet.logpdf(pi_k+1e-16, alpha=alpha_k)
return logprior
def grad_logprior(self, prior, grad, parameters, use_scir=False, **kwargs):
pi_type = getattr(parameters, self._type_name)
alpha = prior.hyperparams[self._alpha]
if use_scir:
if pi_type == "logit":
grad[self._logit_name] = alpha
elif pi_type == "expanded":
grad[self._expanded_name] = alpha
else:
if pi_type == "logit":
grad[self._logit_name] = np.array([
-pi_k*np.sum(alpha_k-1.0) + (alpha_k-1.0)
for pi_k, alpha_k in zip(getattr(parameters, self.name),
alpha)
])
elif pi_type == "expanded":
grad[self._expanded_name] = np.array([
(-exp_pi_k*np.sum(alpha_k-1.0)/np.sum(exp_pi_k) + \
(alpha_k-1.0)) * exp_pi_k
for exp_pi_k, alpha_k in zip(
getattr(parameters, self._expanded_name), alpha)
])
else:
RuntimeError("Unrecognized pi_type")
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
num_states = getattr(parameters, self.dim_names[0])
if kwargs.get('from_mean', False):
alpha = getattr(parameters, self.name)/(var/num_states)
else:
alpha = np.ones((num_states, num_states))/var
prior_kwargs[self._alpha] = alpha
return
def get_default_kwargs(self, default_kwargs, **kwargs):
num_states = kwargs[self.dim_names[0]]
var = kwargs['var']
alpha = np.ones((num_states, num_states))/var
default_kwargs[self._alpha] = alpha
return
class TransitionMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='pi', dim_names=None):
self.name = name
self._logit_name = "logit_{0}".format(name)
self._expanded_name = "expanded_{0}".format(name)
self._type_name = "{0}_type".format(name)
self.dim_names = ['num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
pi_type = getattr(parameters, self._type_name)
if pi_type == 'logit':
precond_grad[self._logit_name] = grad[self._logit_name]
elif pi_type == 'expanded':
if kwargs.get('use_scir', False):
# Don't precondition if using SCIR
precond_grad[self._expanded_name] = grad[self._expanded_name]
else:
precond_grad[self._expanded_name] = (grad[self._expanded_name] *
(1e-99 + np.abs(getattr(parameters, self._expanded_name))))
else:
raise RuntimeError("Unrecognized {0} {1}".format(
self._type_name, pi_type))
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
pi_type = getattr(parameters, self._type_name)
num_states = getattr(parameters, self.dim_names[0])
if pi_type == 'logit':
noise[self._logit_name] = np.random.normal(loc=0,
size=(num_states, num_states))
elif pi_type == 'expanded':
noise[self._expanded_name] = (
(1e-99 + np.abs(getattr(parameters, self._expanded_name)))**0.5 *
np.random.normal(loc=0, size=(num_states, num_states))
)
else:
raise RuntimeError("Unrecognized {0} {1}".format(
self._type_name, pi_type))
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
pi_type = getattr(parameters, self._type_name)
num_states = getattr(parameters, self.dim_names[0])
if pi_type == 'logit':
correction[self._logit_name] = \
np.zeros((num_states, num_states), dtype=float)
elif pi_type == 'expanded':
correction[self._expanded_name] = \
np.ones((num_states, num_states), dtype=float)
else:
raise RuntimeError("Unrecognized {0} {1}".format(
self._type_name, pi_type))
return
| 21,332 | 39.099624 | 101 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/variables/garch_var.py | import numpy as np
import scipy.stats
from scipy.special import expit, logit
from ..base_parameters import (
ParamHelper, PriorHelper, PrecondHelper,
get_value_func, get_hyperparam_func, get_dim_func,
set_value_func, set_hyperparam_func,
)
from .._utils import (
normal_logpdf,
matrix_normal_logpdf,
pos_def_mat_inv,
varp_stability_projection,
)
import logging
logger = logging.getLogger(name=__name__)
## Implementations of GARCH
# Single Square
class GARCHParamHelper(ParamHelper):
def __init__(self):
self.names = ['log_mu', 'logit_phi', 'logit_lambduh']
self.dim_names = []
return
def set_var(self, param, **kwargs):
for name in self.names:
if name in kwargs:
param.var_dict[name] = np.atleast_1d(kwargs[name]).astype(float)
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
for name in self.names:
name_kwargs = kwargs.get(name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
for name in self.names:
vector_list.append(var_dict[name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
for name in self.names:
var = np.reshape(vector[vector_index:vector_index+1], (1))
var_dict[name] = var
vector_index += 1
return vector_index
def get_properties(self):
properties = {}
for name in self.names:
properties[name] = property(
fget=get_value_func(name),
fset=set_value_func(name),
)
properties['mu'] = property(fget=fget_mu)
properties['phi'] = property(fget=fget_phi)
properties['lambduh'] = property(fget=fget_lambduh)
properties['alpha'] = property(fget=fget_alpha)
properties['beta'] = property(fget=fget_beta)
properties['gamma'] = property(fget=fget_gamma)
return properties
def fget_mu(self):
mu = np.exp(self.var_dict['log_mu'])
return mu
def fget_phi(self):
phi = expit(self.var_dict['logit_phi'])
return phi
def fget_lambduh(self):
lambduh = expit(self.var_dict['logit_lambduh'])
return lambduh
def fget_alpha(self):
alpha = self.mu * (1-self.phi)
return alpha
def fget_beta(self):
beta = self.phi * self.lambduh
return beta
def fget_gamma(self):
gamma = self.phi * (1-self.lambduh)
return gamma
class GARCHPriorHelper(PriorHelper):
def __init__(self):
self.names = ['log_mu', 'logit_phi', 'logit_lambduh']
self.hyperparam_names = [
'scale_mu', 'shape_mu',
'alpha_phi', 'beta_phi',
'alpha_lambduh', 'beta_lambduh',
]
return
def set_hyperparams(self, prior, **kwargs):
for name in self.hyperparam_names:
if name in kwargs:
prior.hyperparams[name] = kwargs[name]
else:
raise ValueError("{} must be provided".format(name))
return
def sample_prior(self, prior, var_dict, **kwargs):
# mu
mu = scipy.stats.invgamma(
a=prior.hyperparams['shape_mu'],
scale=prior.hyperparams['scale_mu']
).rvs()
var_dict['log_mu'] = np.log(mu)
# phi
phi = scipy.stats.beta(
a=prior.hyperparams['alpha_phi'],
b=prior.hyperparams['beta_phi'],
).rvs()
var_dict['logit_phi'] = logit(phi)
# lambduh
lambduh = scipy.stats.beta(
a=prior.hyperparams['alpha_lambduh'],
b=prior.hyperparams['beta_lambduh'],
).rvs()
var_dict['logit_lambduh'] = logit(lambduh)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
raise NotImplementedError("GARCH is not conjugate")
def logprior(self, prior, logprior, parameters, **kwargs):
logprior += scipy.stats.invgamma(
a=prior.hyperparams['shape_mu'],
scale=prior.hyperparams['scale_mu']
).logpdf(parameters.mu)
logprior += scipy.stats.beta(
a=prior.hyperparams['alpha_phi'],
b=prior.hyperparams['beta_phi']
).logpdf((1+parameters.phi)/2.0)
logprior += scipy.stats.beta(
a=prior.hyperparams['alpha_lambduh'],
b=prior.hyperparams['beta_lambduh']
).logpdf((1+parameters.lambduh)/2.0)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
grad['log_mu'] = - prior.hyperparams['shape_mu'] - 1 + \
prior.hyperparams['scale_mu'] / parameters.mu
grad['logit_phi'] = (
(prior.hyperparams['alpha_phi'] - 1) / (1 + parameters.phi) -
(prior.hyperparams['beta_phi'] - 1) / (1 - parameters.phi)
) * parameters.phi * (1-parameters.phi)
grad['logit_lambduh'] = (
(prior.hyperparams['alpha_lambduh'] - 1) / (1 + parameters.lambduh) -
(prior.hyperparams['beta_lambduh'] - 1) / (1 - parameters.lambduh)
) * parameters.lambduh * (1-parameters.lambduh)
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
if var > 1:
var = 1
prior_kwargs['scale_mu'] = var + 2
prior_kwargs['shape_mu'] = prior_kwargs['scale_mu'] + 1
prior_kwargs['alpha_phi'] = 1 + 19*var**-1
prior_kwargs['beta_phi'] = prior_kwargs['alpha_phi'] / 9
prior_kwargs['alpha_lambduh'] = 1 + 19*var**-1
prior_kwargs['beta_lambduh'] = prior_kwargs['alpha_lambduh'] / 9
return
def get_default_kwargs(self, default_kwargs, **kwargs):
var = kwargs['var']
if var > 1:
var = 1
default_kwargs['scale_mu'] = var + 2
default_kwargs['shape_mu'] = default_kwargs['scale_mu'] + 1
default_kwargs['alpha_phi'] = 1 + 19*var**-1
default_kwargs['beta_phi'] = default_kwargs['alpha_phi'] / 9
default_kwargs['alpha_lambduh'] = 1 + 19*var**-1
default_kwargs['beta_lambduh'] = default_kwargs['alpha_lambduh'] / 9
return
| 6,592 | 33.518325 | 81 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/particle_filters/custom_kernels.py | """
Advanced Kernels for particle filters
"""
from .kernels import *
from scipy.optimize import root_scalar
from scipy.special import logsumexp, roots_hermitenorm
class SVMLaplaceKernel(LatentGaussianKernel):
def __init__(self, **kwargs):
self.approx_param=dict(mean=0, var=1)
super().__init__(**kwargs)
return
def rv(self, x_t, **kwargs):
""" Laplace Kernel for SVM
Sample x_{t+1} ~ q(x_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
x_next_mean = np.mean(x_t) * self.parameters.A
x_next_var = np.var(x_t) * self.parameters.A**2 + self.parameters.Q
scaled_y2 = (self.parameters.LRinv*self.y_next)**2
taylor_deriv = lambda x: 0.5*scaled_y2*np.exp(-x) - 0.5 - (x-x_next_mean)/x_next_var
laplace_mean = root_scalar(taylor_deriv,
bracket=[-100*np.sqrt(x_next_var), 100*np.sqrt(x_next_var)]
).root
laplace_var = 0.5*scaled_y2*exp(-laplace_mean) + x_next_var**-1
self.approx_param['mean'] = laplace_mean
self.approx_param['var'] = laplace_var
x_next = np.sqrt(laplace_var) * np.random.normal(
size=x_t.shape) + laplace_mean
return x_next
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for Laplace Kernel for SVM
weight_t = p(y_{t+1}, x_{t+1} | x_t, parameters)/q(x_{t+1}, x_t)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# n > 1
raise NotImplementedError()
else:
# n = 1, x is scalar
scaled_y2 = (self.parameters.LRinv*self.y_next)**2
log_weights = \
-0.5*np.log(2.0*np.pi) + \
-0.5*scaled_y2*np.exp(-x_next) + \
np.log(self.parameters.LRinv) + \
-0.5*x_next
diff = x_next - self.parameters.A*x_t
loglike = -0.5*(diff**2)*self.parameters.Qinv + \
-0.5*np.log(2.0*np.pi) + np.log(self.parameters.LQinv)
kernel_diff = x_next - self.approx_param['mean']
kernel_like = -0.5*(diff**2)/self.approx_param['var'] + \
-0.5*np.log(2.0*np.pi) - 0.5*np.log(self.approx_param['var'])
log_weights = np.reshape(log_weights+loglike-kernel_like, (N))
return log_weights
class SVMEPKernel(LatentGaussianKernel):
def __init__(self, **kwargs):
self.approx_param=dict(mean=0, var=1)
super().__init__(**kwargs)
return
def rv(self, x_t, **kwargs):
""" EP Approx Kernel for SVM
Sample x_{t+1} ~ q(x_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
x_next_mean = np.mean(x_t) * self.parameters.A
x_next_var = np.var(x_t) * self.parameters.A**2 + self.parameters.Q
scaled_y2 = (self.parameters.LRinv*self.y_next)**2
# Gauss Quadrature for EP
x_i = roots_hermitenorm(100)[0] * np.sqrt(x_next_var) + x_next_mean
log_w_i = -0.5*(x_i - x_next_mean)/x_next_var + \
-0.5*np.log(2*np.pi*x_next_var) + \
-0.5*scaled_y2*np.exp(-x_i) + \
-0.5*x_i -0.5*np.log(2*np.pi)
w_i = np.exp(log_w_i - logsumexp(log_w_i))
approx_mean = np.sum(x_i*w_i)
approx_var = np.sum(x_i**2*w_i) - approx_mean**2
self.approx_param['mean'] = approx_mean
self.approx_param['var'] = approx_var
x_next = np.sqrt(approx_var) * np.random.normal(
size=x_t.shape) + approx_mean
return x_next
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for EP Approx Kernel for SVM
weight_t = p(y_{t+1}, x_{t+1} | x_t, parameters)/q(x_{t+1}, x_t)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# n > 1
raise NotImplementedError()
else:
# n = 1, x is scalar
scaled_y2 = (self.parameters.LRinv*self.y_next)**2
log_weights = \
-0.5*np.log(2.0*np.pi) + \
-0.5*scaled_y2*np.exp(-x_next) + \
np.log(self.parameters.LRinv) + \
-0.5*x_next
diff = x_next - self.parameters.A*x_t
loglike = -0.5*(diff**2)*self.parameters.Qinv + \
-0.5*np.log(2.0*np.pi) + np.log(self.parameters.LQinv)
kernel_diff = x_next - self.approx_param['mean']
kernel_like = -0.5*(diff**2)/self.approx_param['var'] + \
-0.5*np.log(2.0*np.pi) - 0.5*np.log(self.approx_param['var'])
log_weights = np.reshape(log_weights+loglike-kernel_like, (N))
return log_weights
class SVJMEPKernel(SVJMPriorKernel):
def _calc_ep_fit(self, x_t):
# Gauss Quadrature for EP
pJ = self.parameters.pJ
x_next_mean = x_t.T * self.parameters.phi
x_next_var = self.parameters.sigma2
x_nextJ_var = x_next_var + self.parameters.sigmaJ2
scaled_y2 = (self.parameters.Ltau2inv*self.y_next)**2
x_i = roots_hermitenorm(100)[0]
x_1 = x_i[:, np.newaxis] * np.sqrt(x_nextJ_var) + x_next_mean
x_2 = x_i[:, np.newaxis] * np.sqrt(x_next_var) + x_next_mean
log_w_1 = -0.5*(x_1 - x_next_mean)**2/x_nextJ_var + \
-0.5*np.log(2*np.pi*x_nextJ_var)
log_perturb_1 =-0.5*scaled_y2*np.exp(-x_1) + \
-0.5*x_1 -0.5*np.log(2*np.pi)
w_1 = np.exp(log_perturb_1 + log_w_1 - logsumexp(log_w_1, axis=0))
x_z1 = np.sum(w_1, axis=0)
x_bar1 = np.sum(x_1*w_1, axis=0)/x_z1
x_var1 = np.sum(x_1**2*w_1, axis=0)/x_z1 - x_bar1**2
log_w_2 = -0.5*(x_2 - x_next_mean)**2/x_next_var + \
-0.5*np.log(2*np.pi*x_next_var)
log_perturb_2 =-0.5*scaled_y2*np.exp(-x_2) + \
-0.5*x_2 -0.5*np.log(2*np.pi)
w_2 = np.exp(log_perturb_2 + log_w_2 - logsumexp(log_w_2, axis=0))
x_z2 = np.sum(w_2, axis=0)
x_bar2 = np.sum(x_2*w_2, axis=0)/x_z2
x_var2 = np.sum(x_2**2*w_2, axis=0)/x_z2 - x_bar2**2
x_pJ = pJ*x_z1/(pJ*x_z1 + (1-pJ)*x_z2)
return dict(xJ_bar = x_bar1, xJ_var=x_var1,
x_bar=x_bar2, x_var=x_var2, x_pJ=x_pJ)
def rv(self, x_t, **kwargs):
""" EP Mixture Approx Kernel for SVM
Sample x_{t+1} ~ q(x_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
ep_fit = self._calc_ep_fit(x_t)
jump_ind = np.random.rand(x_t.size) < ep_fit['x_pJ']
x_next_sd = (jump_ind*np.sqrt(ep_fit['xJ_var']) + \
(1-jump_ind)*np.sqrt(ep_fit['x_var']))
x_next_mean = (jump_ind*ep_fit['xJ_bar'] + (1-jump_ind)*ep_fit['x_bar'])
x_next = np.random.normal(size=x_t.shape) * x_next_sd[:,np.newaxis] + x_next_mean[:,np.newaxis]
return x_next
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for EP Approx Kernel for SVM
weight_t = p(y_{t+1}, x_{t+1} | x_t, parameters)/q(x_{t+1}, x_t)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
# log P(y_t+1 | x_t+1)
scaled_y2 = (self.parameters.Ltau2inv*self.y_next)**2
log_weights = \
-0.5*np.log(2.0*np.pi) + \
-0.5*scaled_y2*np.exp(-x_next) + \
np.log(self.parameters.Ltau2inv) + \
-0.5*x_next
# log P(x_t+1 | x_t)
x_diff = x_next - self.parameters.phi*x_t
sigma2_nojump = self.parameters.sigma2
sigma2_jump = self.parameters.sigma2 + self.parameters.sigmaJ2
pJ = self.parameters.pJ
loglike_nojump = -0.5*(x_diff**2)/sigma2_nojump+\
-0.5*np.log(2.0*np.pi) - 0.5*np.log(sigma2_nojump)
loglike_jump = -0.5*(x_diff**2)/sigma2_jump+\
-0.5*np.log(2.0*np.pi) - 0.5*np.log(sigma2_jump)
loglike_max = np.max(np.array([loglike_jump, loglike_nojump]), axis=0)
loglike = np.log(
pJ*np.exp(loglike_jump-loglike_max) +
(1-pJ)*np.exp(loglike_nojump-loglike_max)
) + loglike_max
# log Q(x_t+1 | x_t)
ep_fit = self._calc_ep_fit(x_t)
logker_nojump = \
-0.5*((x_next[:,0]-ep_fit['x_bar'])**2)/ep_fit['x_var'] + \
-0.5*np.log(2.0*np.pi) - 0.5*np.log(ep_fit['x_var'])
logker_jump = \
-0.5*((x_next[:,0]-ep_fit['xJ_bar'])**2)/ep_fit['xJ_var'] + \
-0.5*np.log(2.0*np.pi) - 0.5*np.log(ep_fit['xJ_var'])
logker_max = np.max(np.array([logker_jump, logker_nojump]), axis=0)
logker = np.log(
ep_fit['x_pJ']*np.exp(logker_jump-logker_max) +
(1-ep_fit['x_pJ'])*np.exp(logker_nojump-logker_max)
) + logker_max
log_weights = np.reshape(log_weights+loglike, (N)) - logker
return log_weights
class SVJMEPAvgKernel(SVJMPriorKernel):
def __init__(self, **kwargs):
self.approx_param=dict(mean=0, var=1, mean_J=0, var_J=1, pJ=0.5)
super().__init__(**kwargs)
return
def rv(self, x_t, **kwargs):
""" EP Mixture Approx Kernel for SVM
Sample x_{t+1} ~ q(x_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
Return:
x_next (ndarray): N by n, x_{t+1}
"""
pJ = self.parameters.pJ
x_next_mean = np.mean(x_t) * self.parameters.phi
x_next_var = np.var(x_t) * self.parameters.phi**2 + \
self.parameters.sigma2
x_nextJ_var = x_next_var + self.parameters.sigmaJ2
scaled_y2 = (self.parameters.Ltau2inv*self.y_next)**2
# Gauss Quadrature for EP
x_i = roots_hermitenorm(100)[0]
x_1 = x_i * np.sqrt(x_nextJ_var) + x_next_mean
x_2 = x_i * np.sqrt(x_next_var) + x_next_mean
log_w_1 = -0.5*(x_1 - x_next_mean)**2/x_nextJ_var + \
-0.5*np.log(2*np.pi*x_nextJ_var)
log_perturb_1 =-0.5*scaled_y2*np.exp(-x_1) + \
-0.5*x_1 -0.5*np.log(2*np.pi)
w_1 = np.exp(log_perturb_1 + log_w_1 - logsumexp(log_w_1))
x_z1 = np.sum(w_1)
x_bar1 = np.sum(x_1*w_1)/x_z1
x_var1 = np.sum(x_1**2*w_1)/x_z1 - x_bar1**2
log_w_2 = -0.5*(x_2 - x_next_mean)**2/x_next_var + \
-0.5*np.log(2*np.pi*x_next_var)
log_perturb_2 =-0.5*scaled_y2*np.exp(-x_2) + \
-0.5*x_2 -0.5*np.log(2*np.pi)
w_2 = np.exp(log_perturb_2 + log_w_2 - logsumexp(log_w_2))
x_z2 = np.sum(w_2)
x_bar2 = np.sum(x_2*w_2)/x_z2
x_var2 = np.sum(x_2**2*w_2)/x_z2 - x_bar2**2
x_pJ = pJ*x_z1/(pJ*x_z1 + (1-pJ)*x_z2)
self.approx_param['mean'] = x_bar2
self.approx_param['var'] = x_var2
self.approx_param['mean_J'] = x_bar1
self.approx_param['var_J'] = x_var1
self.approx_param['pJ'] = x_pJ
if x_var1 > x_var2:
nojump_ind = np.random.rand(x_t.size) > x_pJ
x_next = np.sqrt(x_var2)*np.random.normal(size=x_t.shape) + x_bar2
x_next += nojump_ind[:,np.newaxis]*(
np.random.normal(size=x_t.shape)*np.sqrt(x_var1-x_var2) +
x_bar1-x_bar2)
return x_next
else:
jump_ind = np.random.rand(x_t.size) < x_pJ
x_next = np.sqrt(x_var1)*np.random.normal(size=x_t.shape) + x_bar1
x_next += jump_ind[:,np.newaxis]*(
np.random.normal(size=x_t.shape)*np.sqrt(x_var2-x_var1) +
x_bar2-x_bar1)
return x_next
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for EP Approx Kernel for SVM
weight_t = p(y_{t+1}, x_{t+1} | x_t, parameters)/q(x_{t+1}, x_t)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
N = np.shape(x_next)[0]
# log P(y_t+1 | x_t+1)
scaled_y2 = (self.parameters.Ltau2inv*self.y_next)**2
log_weights = \
-0.5*np.log(2.0*np.pi) + \
-0.5*scaled_y2*np.exp(-x_next) + \
np.log(self.parameters.Ltau2inv) + \
-0.5*x_next
# log P(x_t+1 | x_t)
x_diff = x_next - self.parameters.phi*x_t
sigma2_nojump = self.parameters.sigma2
sigma2_jump = self.parameters.sigma2 + self.parameters.sigmaJ2
pJ = self.parameters.pJ
loglike_nojump = -0.5*(x_diff**2)/sigma2_nojump+\
-0.5*np.log(2.0*np.pi) - 0.5*np.log(sigma2_nojump)
loglike_jump = -0.5*(x_diff**2)/sigma2_jump+\
-0.5*np.log(2.0*np.pi) - 0.5*np.log(sigma2_jump)
loglike_max = np.max(np.array([loglike_jump, loglike_nojump]), axis=0)
loglike = np.log(
pJ*np.exp(loglike_jump-loglike_max) +
(1-pJ)*np.exp(loglike_nojump-loglike_max)
) + loglike_max
# log Q(x_t+1 | x_t)
logker_nojump = \
-0.5*((x_next-self.approx_param['mean'])**2)/self.approx_param['var'] + \
-0.5*np.log(2.0*np.pi) - 0.5*np.log(self.approx_param['var'])
logker_jump = \
-0.5*((x_next-self.approx_param['mean_J'])**2)/self.approx_param['var_J'] + \
-0.5*np.log(2.0*np.pi) - 0.5*np.log(self.approx_param['var_J'])
logker_max = np.max(np.array([logker_jump, logker_nojump]), axis=0)
logker = np.log(
self.approx_param['pJ']*np.exp(logker_jump-logker_max) +
(1-self.approx_param['pJ'])*np.exp(logker_nojump-logker_max)
) + logker_max
log_weights = np.reshape(log_weights+loglike-logker, (N))
return log_weights
| 14,606 | 36.841969 | 103 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/particle_filters/kernels.py | """
Kernels for particle filters
"""
import numpy as np
from scipy.special import expit, logsumexp
# Kernel
class Kernel(object):
def __init__(self, **kwargs):
self.parameters = kwargs.get('parameters', None)
self.y_next = kwargs.get('y_next', None)
return
def set_parameters(self, parameters):
self.parameters = parameters
return
def set_y_next(self, y_next):
self.y_next = y_next
return
def sample_x0(self, prior_mean, prior_var, N, n):
""" Initialize x_t
Returns:
x_t (N by n ndarray)
"""
raise NotImplementedError()
def rv(self, x_t, **kwargs):
""" Sample x_{t+1} ~ K(x_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
parameters (dict): parameters
Return:
x_next (ndarray): N by n, x_{t+1}
"""
raise NotImplementedError()
def reweight(self, x_t, x_next, **kwargs):
""" Reweight function for Kernel
weight_t = Pr(y_{t+1}, x_{t+1} | x_t, parameters) /
K(x_{t+1} | x_t, parameters)
Args:
x_t (ndarray): N by n, x_t
x_next (ndarray): N by n, x_{t+1}
Return:
log_weights (ndarray): N, importance weights
"""
raise NotImplementedError()
def log_density(self, x_t, x_next, **kwargs):
""" Density of kernel K(x_{t+1} | x_t, parameters)
Args:
x_t (N by n ndarray): x_t
x_next (N by n ndarray): x_{t+1}
Returns:
loglikelihoods (N ndarray): K(x_next | x_t, parameters)
(ignores constants with respect to x_t & x_next)
"""
raise NotImplementedError()
def get_prior_log_density_max(self):
""" Upper bound for prior log density """
raise NotImplementedError()
def ancestor_log_weights(self, particles, log_weights):
""" Weights for ancestor sampling
Default is log_weights
"""
return log_weights
# LatentGaussianKernel
class LatentGaussianKernel(Kernel):
def sample_x0(self, prior_mean, prior_var, N, n=1):
""" Initialize x_t
Returns:
x_t (N by n ndarray)
"""
if n == 1:
x_t = np.random.normal(
loc=prior_mean,
scale=np.sqrt(prior_var),
size=(N, n))
else:
x_t = np.random.multivariate_normal(
mean=prior_mean,
cov=prior_var,
size=N,
)
return x_t
def prior_log_density(self, x_t, x_next, **kwargs):
""" log density of prior kernel
Args:
x_t (N by n ndarray): x_t
x_next (N by n ndarray): x_{t+1}
Returns:
loglikelihoods (N ndarray): q(x_next | x_t, parameters)
(ignores constants with respect to x_t & x_next
"""
N = np.shape(x_t)[0]
if (len(np.shape(x_t)) > 1) and (np.shape(x_t)[1] > 1):
# x is vector
diff = x_next.T - np.dot(self.parameters.A, x_t.T)
loglikelihoods = -0.5*np.sum(diff*np.dot(
self.parameters.Qinv, diff), axis=0) - \
0.5*np.shape(x_t)[1] * np.log(2.0*np.pi) + \
np.sum(np.log(np.diag(self.parameters.LQinv)))
else:
# n = 1, x is scalar
diff = x_next - self.parameters.A*x_t
loglikelihoods = -0.5*(diff**2)*self.parameters.Qinv + \
-0.5*np.log(2.0*np.pi) + np.log(self.parameters.LQinv)
loglikelihoods = np.reshape(loglikelihoods, (N))
return loglikelihoods
def get_prior_log_density_max(self):
""" Return max value of log density based on current parameters
Returns max_{x,x'} log q(x | x', parameters)
"""
LQinv = self.parameters.LQinv
n = np.shape(LQinv)[0]
loglikelihood_max = -0.5*n*np.log(2.0*np.pi) + \
np.sum(np.log(np.diag(LQinv)))
return loglikelihood_max
| 4,159 | 27.888889 | 74 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/particle_filters/buffered_smoother.py | """
Wrapper Helper Class around PaRIS
"""
import numpy as np
import functools
from .pf import (
paris_smoother, nemeth_smoother, poyiadjis_smoother,
pf_filter,
log_normalize,
)
def pf_wrapper(
observations, parameters, N,
kernel, smoother,
additive_statistic_func, statistic_dim,
t1=0, tL=None, weights=None,
prior_mean = 0.0, prior_var = 1.0,
tqdm = None, tqdm_name = None,
save_all=False, elementwise_statistic=False,
**kwargs):
""" Wrapper around particle smoothers for calculating additive_statistics
Args:
observations (ndarray): observed data
parameters (Parameters): parameters for kernel + additive_statistic_func
N (int): number of smoothed particles
kernel (Kernel): kernel with proposal, reweight, prior_log_density funcs
smoother (func): one of the pf methods in `pf.py`
additive_statistic_func (func): additive statistic func
statistic_dim (int): dimension of additive_statistic_func return array
t1 (int): relative start of left buffer
tL (int): relative end of right buffer (exclusive, buffer is [t1, tL-1])
weights (ndarray): weights for the additive statistics over [t1, tL)
prior_mean (ndarray): prior mean for latent variable
prior_var (ndarray): prior var for latent variable
tqdm (optional): progress bar
tqdm_name (string): message for progress bar
save_all (bool): whether to save intermediate output
elementwise_statistic (bool): whether return elementwise statistic
**kwargs (dict): additional args for smoother
Returns:
out (dict): with outputs
x_t (N by n ndarray): final smoothed/filtered latent variables
log_weights (N ndarray): weights for latent variables
statistics (N by statistic_dim ndarray): final smoothed statistics
(or (statistic_dim ndarray) if smoother is pf_filter)
loglikelihood_estimate (float): loglikelihood estimate
"""
T, m = np.shape(observations)
if tL is None:
tL = T
n = getattr(parameters, 'n', 1)
kernel.set_parameters(parameters=parameters)
if elementwise_statistic:
statistic_dim = (tL - t1) * statistic_dim
# Initialize PF
x_t = kernel.sample_x0(prior_mean=prior_mean, prior_var=prior_var,
N=N, n=n)
log_weights = np.zeros(N)
loglikelihood_estimate = 0.0
if kwargs.get('is_filter', False):
statistics = np.zeros((statistic_dim))
else:
statistics = np.zeros((N, statistic_dim))
def zero_statistics(x_t, x_next, **kwargs):
Ntilde = np.shape(x_t)[0]
return np.zeros((Ntilde, statistic_dim))
if save_all:
# Bookkeeping
all_x_t = [x_t]
all_log_weights = [log_weights]
all_statistics = [statistics]
all_loglikelihood_estimate = [loglikelihood_estimate]
pbar = range(T)
if tqdm is not None:
pbar = tqdm(pbar, leave=False)
if tqdm_name is not None:
pbar.set_description('PF: {0}'.format(tqdm_name))
for t in pbar:
kernel.set_y_next(y_next=observations[t])
# Only Sum over terms not in the buffer
if t < t1 or t >= tL:
weight_t = 1.0
additive_statistic_func_t = zero_statistics
else:
weight_t = 1.0 if weights is None else weights[t-t1]
additive_statistic_func_t = functools.partial(
additive_statistic_func,
y_next = observations[t],
t = t,
parameters=parameters)
if elementwise_statistic:
# Adjust statistic function if elementwise
additive_statistic_func_t = elementwise_statistic_wrapper(
additive_statistic_func_t,
shift=t-t1,
length=tL-t1,
)
# Run one step of the PF/Smoother
x_t, log_weights, statistics = smoother(
x_t, log_weights, statistics,
additive_statistic_func=additive_statistic_func_t,
kernel=kernel,
additive_scale=weight_t,
**kwargs,
)
# Update Loglikelihood estimate
if (t >= t1) and (t < tL):
loglikelihood_estimate += \
weight_t * np.log(np.mean(np.exp(log_weights)))
if save_all:
# Bookkeeping
all_x_t.append(x_t)
all_log_weights.append(log_weights)
all_statistics.append(statistics)
all_loglikelihood_estimate.append(loglikelihood_estimate)
# Final Output
out = {}
if save_all:
# Bookkeeping
out['all_x_t'] = np.array(all_x_t)
out['all_log_weights'] = np.array(all_log_weights)
out['all_statistics'] = np.array(all_statistics)
out['all_loglikelihood_estimate'] = np.array(all_loglikelihood_estimate)
out['x_t'] = x_t
out['log_weights'] = log_weights
out['statistics'] = statistics
out['loglikelihood_estimate'] = loglikelihood_estimate
return out
def average_statistic(out):
mean_statistic = np.sum(
out['statistics'].T * log_normalize(out['log_weights']), axis=1)
return mean_statistic
def buffered_pf_wrapper(pf, **kwargs):
""" Wrapper for buffered pf wrappers
Args:
pf (string)
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
"filter" - just use PF (no smoothing)
**kwargs
Returns:
out (dict)
"""
if pf == "nemeth":
tqdm_name = kwargs.pop('tqdm_name', 'Nemeth O(N)')
smoother = nemeth_smoother
out = pf_wrapper(smoother=smoother, tqdm_name=tqdm_name, **kwargs)
elif pf == "poyiadjis_N":
tqdm_name = kwargs.pop('tqdm_name', 'Poyiadjis O(N)')
lambduh = 1.0
smoother = nemeth_smoother
out = pf_wrapper(smoother=smoother, tqdm_name=tqdm_name,
lambduh=lambduh, **kwargs)
elif pf == "poyiadjis_N2":
tqdm_name = kwargs.pop('tqdm_name', 'Poyiadjis O(N^2)')
smoother = poyiadjis_smoother
out = pf_wrapper(smoother=smoother, tqdm_name=tqdm_name, **kwargs)
elif pf == "paris":
tqdm_name = kwargs.pop('tqdm_name', 'PaRIS')
smoother = paris_smoother
out = pf_wrapper(smoother=smoother, tqdm_name=tqdm_name, **kwargs)
elif pf == "filter":
tqdm_name = kwargs.pop('tqdm_name', 'Particle Filter')
smoother = pf_filter
kwargs['is_filter'] = True
out = pf_wrapper(smoother=smoother, tqdm_name=tqdm_name, **kwargs)
else:
raise ValueError("Unrecognized pf = {0}".format(pf))
return out
def elementwise_statistic_wrapper(
additive_statistic_func, shift, length):
def shifted_statistic_func(**kwargs):
statistic = additive_statistic_func(**kwargs)
N, statistic_dim = np.shape(statistic)
shifted_statistic = np.zeros((N, statistic_dim*length))
shifted_statistic[:,shift*statistic_dim:(shift+1)*statistic_dim] = \
statistic
return shifted_statistic
return shifted_statistic_func
| 7,476 | 34.268868 | 80 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/particle_filters/__init__.py | 0 | 0 | 0 | py | |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/particle_filters/pf.py | """
Bootstrap Particle Filter
"""
import numpy as np
import functools
def pf(particles, log_weights, kernel):
""" Algorithm 1 of PaRIS
{particles_t, weights_t} -> {particles_{t+1}, weights_{t+1}}
Args:
particles (ndarray): N by n, latent states (xi_t)
log_weights (ndarray): N, log importance weights (log omega_t)
kernel (Kernel): kernel with functions
rv (func): proposal function for new particles
reweight (func): reweighting function for new particles
Returns:
new_particles (ndarray): N by n, latent states (xi_{t+1})
new_log_weights (ndarray): N, log importance weights (log omega_{t+1})
ancestor_indices (ndarray): N, ancestor indicies (I)
"""
N = np.shape(particles)[0]
# Multinomial Resampling
ancestor_log_weights = kernel.ancestor_log_weights(particles, log_weights)
ancestor_indices = np.random.choice(range(N),
size=N, replace=True, p=log_normalize(ancestor_log_weights))
sampled_particles = particles[ancestor_indices]
# Propose Descendents
new_particles = kernel.rv(sampled_particles)
# Weight Descendents
new_log_weights = kernel.reweight(sampled_particles, new_particles)
return new_particles, new_log_weights, ancestor_indices
def pf_filter(particles, log_weights, statistics, additive_statistic_func, kernel, **kwargs):
""" Calculate SUM[ E[h(x_t, x_{t+1}) | Y_{<=t+1}] ]
Args:
particles (ndarray): N by n, latent states (x_t)
log_weights (ndarray): N, log importance weights (log w_t)
statistics (ndarray): h, estimated aux statistics (m_t)
additive_statistic_func (func): h_t(x_t, x_{t+1})
function of x_t, x_{t+1}, return h_t(x_t, x_{t+1})
kernel (Kernel): aux kernel for pf
rv (func): proposal function for new particles
reweight (func): reweighting function for new particles
log_density (func): return log_density of K(xi_t, xi_{t+1})
Return:
new_particles (ndarray): N by n, latent states (x_{t+1})
new_log_weights (ndarray): N, log importance weights (log w_{t+1})
new_statistics (ndarray): h, estimated aux statistics (m_{t+1})
"""
N = np.shape(particles)[0]
new_particles, new_log_weights, ancestor_indices = pf(
particles=particles, log_weights=log_weights,
kernel=kernel,
)
additive_statistic = \
additive_statistic_func(
x_t=particles[ancestor_indices],
x_next=new_particles,
)
additive_statistic *= kwargs.get('additive_scale', 1.0)
if kwargs.get('logsumexp', False):
max_add_stat = np.max(additive_statistic.T, axis=1)
new_statistics = statistics + max_add_stat + \
np.log(np.sum(np.exp(additive_statistic.T- max_add_stat[:,np.newaxis]) * log_normalize(new_log_weights)))
else:
new_statistics = statistics + \
np.sum(additive_statistic.T * log_normalize(new_log_weights),
axis=1)
return new_particles, new_log_weights, new_statistics
def poyiadjis_smoother(particles, log_weights, statistics,
additive_statistic_func, kernel,
**kwargs):
""" Algorithm 2 of Poyiadjis et al. Biometrika (2011) O(N^2) algorithm
Calculate E[SUM[h(x_t, x_{t+1})] | Y]
Args:
particles (ndarray): N by n, latent states (x_t)
log_weights (ndarray): N, log importance weights (log w_t)
statistics (ndarray): N by h, estimated aux statistics (m_t)
additive_statistic_func (func): h_t(x_t, x_{t+1})
function of x_t, x_{t+1}, return h_t(x_t, x_{t+1})
kernel (Kernel): aux kernel for pf
rv (func): proposal function for new particles
reweight (func): reweighting function for new particles
log_density (func): return log_density of K(xi_t, xi_{t+1})
Return:
new_particles (ndarray): N by n, latent states (x_{t+1})
new_log_weights (ndarray): N, log importance weights (log w_{t+1})
new_statistics (ndarray): N by h, estimated aux statistics (m_{t+1})
"""
N = np.shape(particles)[0]
new_particles, new_log_weights, ancestor_indices = pf(
particles=particles, log_weights=log_weights,
kernel=kernel,
)
backward_weights = np.zeros((N, N)) # Weights for Eq. (20)
for i, x_next in enumerate(new_particles):
child_loglikelihood = kernel.prior_log_density(
particles, np.outer(np.ones(N), x_next),
)
# J_{t+1}
backward_weights[i] = log_normalize(log_weights + child_loglikelihood)
indices = np.array([ii for _ in range(N) for ii in range(N)]) # 0,1,2,..., 0,1,2,...,0,1,2,...
new_indices = np.array([ii for ii in range(N) for _ in range(N)]) # 0,0,0,...,1,1,1,...,2,2,2,...
additive_statistic = additive_statistic_func(
x_t=particles[indices],
x_next=new_particles[new_indices],
)
additive_statistic *= kwargs.get('additive_scale', 1.0)
new_statistics = np.reshape(
statistics[indices] + additive_statistic,
(N, N, -1)
)
new_statistics = np.einsum('ijk,ij->ik', new_statistics, backward_weights)
return new_particles, new_log_weights, new_statistics
def nemeth_smoother(particles, log_weights, statistics,
additive_statistic_func, kernel,
lambduh = 0.95, **kwargs):
""" Algorithm 2 of Nemeth et al. (2015)
Calculate E[SUM[h(x_t, x_{t+1})] | Y]
Args:
particles (ndarray): N by n, latent states (x_t)
log_weights (ndarray): N, log importance weights (log w_t)
statistics (ndarray): N by h, estimated aux statistics (m_t)
additive_statistic_func (func): h_t(x_t, x_{t+1})
function of x_t, x_{t+1}, return h_t(x_t, x_{t+1})
kernel (Kernel): aux kernel for pf
lambduh (double, optional): shrinkage parameter
Return:
new_particles (ndarray): N by n, latent states (x_{t+1})
new_log_weights (ndarray): N, log importance weights (log w_{t+1})
new_statistics (ndarray): N by h, estimated aux statistics (m_{t+1})
"""
N = np.shape(particles)[0]
S = np.sum(statistics.T * log_normalize(log_weights), axis=1)
new_particles, new_log_weights, ancestor_indices = pf(
particles=particles, log_weights=log_weights,
kernel=kernel,
)
additive_statistic = \
additive_statistic_func(
x_t=particles[ancestor_indices],
x_next=new_particles,
)
additive_statistic *= kwargs.get('additive_scale', 1.0)
new_statistics = (
lambduh * statistics[ancestor_indices] +
(1.0-lambduh) * np.outer(np.ones(N), S) +
additive_statistic
)
return new_particles, new_log_weights, new_statistics
def paris_smoother(particles, log_weights, statistics,
additive_statistic_func, kernel,
Ntilde=2, accept_reject=True,
max_accept_reject=None, manual_sample_threshold=None,
**kwargs):
""" Algorithm 2 of PaRIS
Calculate E[SUM[h(x_t, x_{t+1})] | Y]
Args:
particles (ndarray): N by n, latent states (xi_t)
log_weights (ndarray): N, log importance weights (log omega_t)
statistics (ndarray): N by h, estimated aux statistics (tau_t)
additive_statistic_func (func): h_t(xi_t, xi_{t+1})
function of xi_t, xi_{t+1}, return h_t(xi_t, xi_{t+1})
kernel (Kernel): kernel with functions
rv (func): proposal function for new particles
reweight (func): reweighting function for new particles
log_density (func): return log_density of K(xi_t, xi_{t+1})
Ntilde (int, optional): precision parameter
Return:
new_particles (ndarray): N by n, latent states (xi_{t+1})
new_log_weights (ndarray): N, log importance weights (log omega_{t+1})
new_statistics (ndarray): N by h, estimated aux statistics (tau_{t+1})
"""
N = np.shape(particles)[0]
new_particles, new_log_weights, _ = pf(
particles=particles, log_weights=log_weights,
kernel=kernel,
)
if accept_reject:
# Accept-Reject O(NK) Implementation
rewired_ancestor_indices = accept_reject_based_backward_sampling(
particles, log_weights, new_particles, kernel, Ntilde,
max_accept_reject=max_accept_reject,
manual_sample_threshold=manual_sample_threshold,
)
else:
# Naive O(N^2) Implementation
rewired_ancestor_indices = [None] * N
for i, xi_next in enumerate(new_particles):
child_loglikelihood = kernel.prior_log_density(
particles, np.outer(np.ones(N), xi_next),
)
# J_{t+1}
rewired_ancestor_indices[i] = np.random.choice(
range(N), size = Ntilde, replace = True,
p=log_normalize(log_weights + child_loglikelihood))
rewired_ancestor_indices = np.array(rewired_ancestor_indices)
# Vectorized Update of Additive Statistics
rewired_statistics = statistics[rewired_ancestor_indices.flatten()]
rewired_parents = particles[rewired_ancestor_indices.flatten()]
indices = np.array([ii for ii in range(N) for _ in range(Ntilde)])
xi_next = new_particles[indices]
additive_statistic = additive_statistic_func(
x_t=rewired_parents,
x_next=xi_next,
)
additive_statistic *= kwargs.get('additive_scale', 1.0)
new_statistics = np.reshape(
rewired_statistics + additive_statistic,
(N, Ntilde, -1)
)
new_statistics = np.mean(new_statistics, axis=1)
return new_particles, new_log_weights, new_statistics
def accept_reject_based_backward_sampling(particles, log_weights, new_particles,
kernel, Ntilde, max_accept_reject=None, manual_sample_threshold=None):
""" Algorithm 3 of PaRIS to sample J (rewired ancestor indices)
Args:
particles (ndarray): N by n, latent states (xi_t)
log_weights (ndarray): N, log importance weights (log omega_t)
new_particles (ndarray): N by n, latent states (xi_{t+1})
kernel (Kernel): kernel
Ntilde (int): precision parameter
max_accept_reject (int, optional): number of accept_reject tries
(default is 100*log10(N/10))
manual_sample_threshold (int, optional):
threshold number of samples to manually sample
early terminates accept_reject
(default is 10*log10(N/10))
Returns:
rewired_ancestor_indices (ndarray): N by Ntilde, indices J
"""
N = np.shape(particles)[0]
weights = log_normalize(log_weights)
loglikelihood_max = kernel.get_prior_log_density_max()
if max_accept_reject is None:
max_accept_reject = int(100*np.log10(N/10))
if manual_sample_threshold is None:
manual_sample_threshold = int(10*np.log10(N/10))
# J
rewired_ancestor_indices = np.zeros((N, Ntilde), dtype=int)
for j in range(Ntilde):
L = [ii for ii in range(N)]
converged = False
for _ in range(max_accept_reject):
size_L = len(L)
new_L = []
# Exit when L is empty
if size_L == 0:
converged = True
break
# Early terminate to manual resample when L is small
if size_L <= manual_sample_threshold:
break
# Draw I
indices = np.random.choice(
range(N), size=size_L, replace=True, p=weights,
)
# Draw U
uniforms = np.random.rand((size_L))
# Calculate q(xi^I, xi^L)
child_loglikelihood = kernel.prior_log_density(
particles[indices], new_particles[L])
threshold = np.exp(child_loglikelihood-loglikelihood_max)
for k in range(size_L):
if uniforms[k] <= threshold[k]:
# Set J[L[k], j] = I[k]
rewired_ancestor_indices[L[k], j] = indices[k]
else:
new_L.append(L[k])
L = new_L
#print("Not Converged {0} of {1} after {2} of {3} steps".format(len(L), N, _, max_accept_reject))
if not converged:
# Manually Sample remaining i
#print("Manually Sampling {0} of {1} after {2} of {3} steps".format(len(L), N, _, max_accept_reject))
for i in L:
child_loglikelihood = kernel.prior_log_density(
particles,
np.outer(np.ones(N), new_particles[i]),
)
# J_{t+1}
rewired_ancestor_indices[i,j] = np.random.choice(
range(N), size = 1, replace = True,
p=log_normalize(log_weights + child_loglikelihood))
return rewired_ancestor_indices
def efficient_multiomial_sampling(num_samples, prob_weight):
""" Efficient multinomial sampling of num_samples from N
Takes O(n + n log(1+N/n)
Algorithm 2 in Appendix B.1 of https://arxiv.org/pdf/1202.2945.pdf
Note I did not find this faster than numpy's np.random.choice
"""
N = len(prob_weight)
indices = np.zeros((num_samples), dtype=int) # I
cum_weights = np.cumsum(prob_weight) # q
ordered_U = np.random.rand(num_samples) # U
ordered_U.sort()
l, r = 0, 1
for k in range(num_samples):
d = 1
while ordered_U[k] > cum_weights[r-1]:
l = r
r = min([r+2**d, N])
d = d+1
while r-l > 1:
m = int(np.floor((r+l)/2))
if ordered_U[k] > cum_weights[m-1]:
l = m
else:
r = m
indices[k] = r-1
return np.random.permutation(indices)
def log_normalize(log_weights):
probs = np.exp(log_weights-np.max(log_weights))
probs /= np.sum(probs)
return probs
| 14,308 | 36.360313 | 121 | py |
Retrieving-Top-Weighted-Triangles-in-Graphs | Retrieving-Top-Weighted-Triangles-in-Graphs-master/src/print_table.py | import os
import numpy as np
files = [
'tags-stack-overflow',
'threads-stack-overflow',
'wikipedia',
'eth',
'aminer',
'temporal-reddit-reply',
'MAG',
'spotify'
]
datasets = [
'tags-stack-overflow',
'threads-stack-overflow',
'Wikipedia-clickstream',
'Ethereum',
'Aminer',
'reddit-reply',
'MAG',
'Spotify'
]
l_sort = 7
l_shl_alg = 28
l_dhl_pre = 33
l_dhl_alg = 37
l_auto_pre = 42
l_auto_alg = 46
l_brute = 53
l_shl_acc = 60
spot_shl_acc = 53
edge_sort = 7
for k in [1000, 100000]:
for (fname, dataset) in zip(files, datasets):
brute_avg, shl_avg, dhl_avg, auto_avg = 0, 0, 0, 0
edge_time_avg, edge_acc_avg = 0, 0
wedge_time_avg = 0
for run in range(1,11):
# Deterministic stuff.
path = os.path.join('../output/compare_deterministic_{}_{}'.format(k, run), fname)
with open(path, 'r') as f:
lines = f.readlines()
sort_time = float(lines[l_sort].split()[-1])
shl_alg_time = float(lines[l_shl_alg].split()[-1])
dhl_pre_time = float(lines[l_dhl_pre].split()[-1])
dhl_alg_time = float(lines[l_dhl_alg].split()[-1])
auto_pre_time = float(lines[l_auto_pre].split()[-1])
auto_alg_time = float(lines[l_auto_alg].split()[-1])
if fname != 'spotify':
shl_acc = float(lines[l_shl_acc].split()[-1])
brute_time = float(lines[l_brute].split()[-1])
else:
shl_acc = float(lines[spot_shl_acc].split()[-1])
brute_time = '>86400'
shl_time = sort_time + shl_alg_time
dhl_time = sort_time + dhl_pre_time + dhl_alg_time
auto_time = sort_time + auto_pre_time + auto_alg_time
if fname != 'spotify':
brute_avg += brute_time
else:
brute_avg = brute_time
shl_avg += shl_time
dhl_avg += dhl_time
auto_avg += auto_time
# Edge sampling stuff.
path = os.path.join('../output/compare_parallel_edge_{}_{}'.format(k, run), fname)
with open(path, 'r') as f:
lines = f.readlines()
pre_times, times, accs = [], [], []
edge_sort_time = float(lines[edge_sort].split()[-1])
for line in lines:
if 'Pre-processing' in line:
pre_times.append(float(line.split()[-1]))
if 'Total Time' in line:
times.append(float(line.split()[-1]))
if 'Accuracy' in line:
accs.append(float(line.split()[-1]))
times = times[:-1] # don't need time taken by adaptive heavy light
tot_times = [edge_sort_time + p + t for (p,t) in zip(pre_times, times)]
if k == 1000:
thresh = 0.98
else:
thresh = 0.49
flag = False
for (i, (acc, t)) in enumerate(zip(accs, tot_times)):
if fname == 'wikipedia' and i == 0:
continue
if acc > thresh:
edge_acc_avg += acc
edge_time_avg += t
flag = True
break
if not flag:
edge_time_avg += times[np.argmax(accs)]
# Wedge sampling stuff.
if k == 100000:
pass
else:
if fname not in ['MAG', 'tags-stack-overflow', 'temporal-reddit-reply', 'spotify']:
pass
else:
if fname != 'spotify':
path = os.path.join('../output/compare_parallel_wedge_{}_{}'.format(k, run), fname)
with open(path, 'r') as f:
lines = f.readlines()
pre_times, times, accs = [], [], []
wedge_sort_time = float(lines[edge_sort].split()[-1])
for line in lines:
if 'Pre-processing' in line:
pre_times.append(float(line.split()[-1]))
if 'Total Time' in line:
times.append(float(line.split()[-1]))
if 'Accuracy' in line:
accs.append(float(line.split()[-1]))
times = times[:-1] # don't need time taken by adaptive heavy light
tot_times = [wedge_sort_time + p + t for (p,t) in zip(pre_times, times)]
thresh = 0.49
flag = False
for (i, (acc, t)) in enumerate(zip(accs, tot_times)):
if fname == 'MAG' and i == 0:
continue
if acc > thresh:
wedge_time_avg += t
flag = True
break
if not flag:
wedge_time_avg += times[np.argmax(accs)]
else:
wedge_time_avg = 0
if fname != 'spotify':
brute_avg /= 10
shl_avg /= 10
dhl_avg /= 10
auto_avg /= 10
edge_acc_avg /= 10
edge_time_avg /= 10
wedge_time_avg /= 10
row = '& {:>25}'.format(dataset)
if fname != 'spotify':
row += ' & {:10.2f}'.format(brute_avg)
else:
row += ' & {:>10}'.format(brute_time)
row += ' & {:8.2f}'.format(edge_time_avg)
if k == 100000 or fname not in ['MAG', 'tags-stack-overflow', 'temporal-reddit-reply', 'spotify']:
if fname != 'spotify':
tmp = '{:.2f}'.format(brute_avg)
row += ' & {:>8}'.format('>{}'.format(tmp))
else:
row += ' & {:8.2f}'.format(0)
else:
row += ' & {:8.2f}'.format(wedge_time_avg)
row += ' & {:7.2f} & {:7.2f} & {:7.2f} & {:7.2f} \\\\'.format(
dhl_avg, auto_avg, shl_avg, shl_acc
)
print(row)
print('\n') | 6,198 | 35.251462 | 107 | py |
Retrieving-Top-Weighted-Triangles-in-Graphs | Retrieving-Top-Weighted-Triangles-in-Graphs-master/src/plot_static_hl_tradeoff.py | import matplotlib.pyplot as plt
import numpy as np
def make_plot(thresholds, accs, times, k, dataset):
thresholds = [0] + thresholds
times = [0] + times
accs = [0] + accs
fig, ax = plt.subplots()
# plt.subplot(2, 1, 1)
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
for y in np.arange(0, 1, 0.2):
plt.plot(range(0, 100), [y] * len(range(0, 100)), "--", lw=0.5, color="black", alpha=0.3)
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
plt.ylim(0, accs[-1])
plt.xlim(0, thresholds[-1])
plt.plot(thresholds, accs, label='accuracy', color='green')
plt.ylabel('accuracy', fontsize=24)
plt.xlabel('percentage of edges labelled heavy', fontsize=24)
fig.savefig('../figs/static_hl_tradeoff_accuracy_{}_{}.pdf'.format(dataset, k), bbox_inches='tight')
plt.close()
fig, ax = plt.subplots()
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
if dataset == 'eth':
step = 20
else:
step = 2.5
for y in np.arange(0, int(times[-1])+1, step):
plt.plot(range(0, 100), [y] * len(range(0, 100)), "--", lw=0.5, color="black", alpha=0.3)
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
plt.ylim(0, times[-1])
plt.xlim(0, thresholds[-1])
plt.plot(thresholds, times, label='time (s)', color='red')
plt.ylabel('time (seconds)', fontsize=24)
plt.xlabel('percentage of edges labelled heavy', fontsize=24)
fig.savefig('../figs/static_hl_tradeoff_time_{}_{}.pdf'.format(dataset, k), bbox_inches='tight')
plt.close()
def get_info_and_plot(dataset, lines):
sort_time = float(lines[7].split()[-1])
idx = -1
kvals = [25, 1000, 40000]
times, accs = [], []
for line in lines:
if 'brute' in line:
idx += 1
k = kvals[idx]
times, accs = [], []
if 'Total Time' in line:
times.append(float(line.split()[-1])+sort_time)
if 'Accuracy' in line:
accs.append(float(line.split()[-1]))
if len(accs) == len(thresholds):
make_plot(thresholds, accs, times[1:], k, dataset)
times, accs = [], []
thresholds = list(range(5, 105, 5))
with open('../output/static_hl') as f:
lines = f.readlines()
get_info_and_plot('eth', lines[1055:]) | 2,327 | 32.73913 | 102 | py |
Retrieving-Top-Weighted-Triangles-in-Graphs | Retrieving-Top-Weighted-Triangles-in-Graphs-master/src/plot_edge_weights.py | import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import powerlaw
from scipy.stats import binned_statistic
def drop_zeros(a_list):
return [i for i in a_list if i>0]
# https://stackoverflow.com/questions/16489655/plotting-log-binned-network-degree-distributions
def log_binning(counter_dict,bin_count=35):
max_x = np.log10(max(counter_dict.keys()))
max_y = np.log10(max(counter_dict.values()))
max_base = max([max_x,max_y])
min_x = np.log10(min(drop_zeros(counter_dict.keys())))
bins = np.logspace(min_x,max_base,num=bin_count)
# Based off of: http://stackoverflow.com/questions/6163334/binning-data-in-python-with-scipy-numpy
# bin_means_y = (np.histogram(list(counter_dict.keys()),bins,weights=list(counter_dict.values()))[0] / np.histogram(list(counter_dict.keys()),bins)[0])
# bin_means_x = (np.histogram(list(counter_dict.keys()),bins,weights=list(counter_dict.keys()))[0] / np.histogram(list(counter_dict.keys()),bins)[0])
bin_means = binned_statistic(list(counter_dict.keys()),
[list(counter_dict.keys()), list(counter_dict.values())],
bins=bins)[0]
return bin_means
def fit_powerlaw(x, y, xmin):
l = [([xx]*yy) for (xx, yy) in zip(x, y)]
l = [item for sublist in l for item in sublist]
dist = powerlaw.Fit(l, xmin=xmin, discrete=True)
return dist
datasets = ['tags-stack-overflow', 'wikipedia']
for dataset in datasets:
path = '../output/edge_weights_{}'.format(dataset)
with open(path, 'r') as f:
lines = f.readlines()
x = [int(line.split()[0]) for line in lines]
y = [int(line.split()[1]) for line in lines]
m = np.sum(y)
xmin = None
if dataset == 'wikipedia':
xmin = 11
dist = fit_powerlaw(x, y, xmin)
print('Datset: {}'.format(dataset))
print('alpha: {}, xmin: {}'.format(dist.power_law.alpha, dist.power_law.xmin))
ba_c2 = {xx : yy / m for (xx,yy) in zip(x,y)}
ba_x,ba_y = log_binning(ba_c2,50)
fig, ax = plt.subplots()
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlim(min(x), max(x))
plt.ylim(min(y/m), max(y/m))
plt.xscale('log')
plt.yscale('log')
ax.scatter(ba_x, ba_y, color='red', marker='s', s=50)
alpha, xmin = dist.power_law.alpha, dist.power_law.xmin
p_x = [t for t in ba_x if t >= xmin]
p_y = [(alpha-1)/xmin * (t/xmin)**(-alpha) for t in p_x]
ax.plot(p_x, p_y)
ax.get_lines()[0].set_color('blue')
plt.xlabel('edge weight', fontsize=20)
plt.ylabel('fraction', fontsize=20)
plt.title('Dataset: {}'.format(dataset), fontsize=20)
red_patch = mpatches.Patch(color='red', label='empirical fraction')
blue_patch = mpatches.Patch(color='blue', label='best fit powerlaw')
plt.legend(handles=[blue_patch, red_patch], fontsize=20)
fig.savefig('../figs/edge_weights_{}.pdf'.format(dataset), bbox_inches='tight')
plt.close() | 3,021 | 38.246753 | 155 | py |
Retrieving-Top-Weighted-Triangles-in-Graphs | Retrieving-Top-Weighted-Triangles-in-Graphs-master/src/print_parallel_edge_times.py | import argparse
import os
files = [
'tags-stack-overflow',
'threads-stack-overflow',
'wikipedia',
'eth',
'aminer',
'temporal-reddit-reply',
'MAG',
'spotify'
]
parser = argparse.ArgumentParser()
parser.add_argument('-k')
args = parser.parse_args()
for fname in files:
path = os.path.join('../output/compare_parallel_edge_{}'.format(args.k), fname)
with open(path, 'r') as f:
lines = f.readlines()
sort_time = 0
pre_times = []
times = []
total_times = []
accuracies = []
for line in lines:
if "Sort time" in line:
sort_time = float(line.split()[-1])
if "Pre-processing" in line:
pre_times.append(float(line.split()[-1]))
if "Total Time" in line:
times.append(float(line.split()[-1]))
if "Accuracy" in line:
accuracies.append(float(line.split()[-1]))
for (x, y) in zip(pre_times, times):
total_times.append(sort_time+x+y)
print("Dataset: {}".format(fname))
for (t, a) in zip(total_times, accuracies):
print("Time: {:.3f}, accuracy: {:.3f}".format(t, a))
print("---------------------------")
| 1,123 | 25.139535 | 81 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.