repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
steer | steer-master/ffjord/datasets/gas.py | import pandas as pd
import numpy as np
import datasets
class GAS:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'gas/ethylene_CO.pickle'
trn, val, tst = load_data_and_clean_and_split(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def load_data(file):
data = pd.read_pickle(file)
# data = pd.read_pickle(file).sample(frac=0.25)
# data.to_pickle(file)
data.drop("Meth", axis=1, inplace=True)
data.drop("Eth", axis=1, inplace=True)
data.drop("Time", axis=1, inplace=True)
return data
def get_correlation_numbers(data):
C = data.corr()
A = C > 0.98
B = A.as_matrix().sum(axis=1)
return B
def load_data_and_clean(file):
data = load_data(file)
B = get_correlation_numbers(data)
while np.any(B > 1):
col_to_remove = np.where(B > 1)[0][0]
col_name = data.columns[col_to_remove]
data.drop(col_name, axis=1, inplace=True)
B = get_correlation_numbers(data)
# print(data.corr())
data = (data - data.mean()) / data.std()
return data
def load_data_and_clean_and_split(file):
data = load_data_and_clean(file).as_matrix()
N_test = int(0.1 * data.shape[0])
data_test = data[-N_test:]
data_train = data[0:-N_test]
N_validate = int(0.1 * data_train.shape[0])
data_validate = data_train[-N_validate:]
data_train = data_train[0:-N_validate]
return data_train, data_validate, data_test
| 1,672 | 21.917808 | 59 | py |
steer | steer-master/ffjord/datasets/bsds300.py | import numpy as np
import h5py
import datasets
class BSDS300:
"""
A dataset of patches from BSDS300.
"""
class Data:
"""
Constructs the dataset.
"""
def __init__(self, data):
self.x = data[:]
self.N = self.x.shape[0]
def __init__(self):
# load dataset
f = h5py.File(datasets.root + 'BSDS300/BSDS300.hdf5', 'r')
self.trn = self.Data(f['train'])
self.val = self.Data(f['validation'])
self.tst = self.Data(f['test'])
self.n_dims = self.trn.x.shape[1]
self.image_size = [int(np.sqrt(self.n_dims + 1))] * 2
f.close()
| 663 | 17.971429 | 66 | py |
steer | steer-master/ffjord/datasets/miniboone.py | import numpy as np
import datasets
class MINIBOONE:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'miniboone/data.npy'
trn, val, tst = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def load_data(root_path):
# NOTE: To remember how the pre-processing was done.
# data = pd.read_csv(root_path, names=[str(x) for x in range(50)], delim_whitespace=True)
# print data.head()
# data = data.as_matrix()
# # Remove some random outliers
# indices = (data[:, 0] < -100)
# data = data[~indices]
#
# i = 0
# # Remove any features that have too many re-occuring real values.
# features_to_remove = []
# for feature in data.T:
# c = Counter(feature)
# max_count = np.array([v for k, v in sorted(c.iteritems())])[0]
# if max_count > 5:
# features_to_remove.append(i)
# i += 1
# data = data[:, np.array([i for i in range(data.shape[1]) if i not in features_to_remove])]
# np.save("~/data/miniboone/data.npy", data)
data = np.load(root_path)
N_test = int(0.1 * data.shape[0])
data_test = data[-N_test:]
data = data[0:-N_test]
N_validate = int(0.1 * data.shape[0])
data_validate = data[-N_validate:]
data_train = data[0:-N_validate]
return data_train, data_validate, data_test
def load_data_normalised(root_path):
data_train, data_validate, data_test = load_data(root_path)
data = np.vstack((data_train, data_validate))
mu = data.mean(axis=0)
s = data.std(axis=0)
data_train = (data_train - mu) / s
data_validate = (data_validate - mu) / s
data_test = (data_test - mu) / s
return data_train, data_validate, data_test
| 1,955 | 26.942857 | 96 | py |
steer | steer-master/ffjord/datasets/__init__.py | root = 'data/'
from .power import POWER
from .gas import GAS
from .hepmass import HEPMASS
from .miniboone import MINIBOONE
from .bsds300 import BSDS300
| 153 | 18.25 | 32 | py |
steer | steer-master/ffjord/diagnostics/plot_sn_losses.py | import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
CIFAR10 = "diagnostics/cifar10_multiscale.log"
CIFAR10_SN = "diagnostics/cifar10_multiscale_sn.log"
MNIST = "diagnostics/mnist_multiscale.log"
MNIST_SN = "diagnostics/mnist_multiscale_sn.log"
def get_values(filename):
with open(filename, "r") as f:
lines = f.readlines()
losses = []
nfes = []
for line in lines:
w = re.findall(r"Steps [^|(]*\([0-9\.]*\)", line)
if w: w = re.findall(r"\([0-9\.]*\)", w[0])
if w: w = re.findall(r"[0-9\.]+", w[0])
if w:
nfes.append(float(w[0]))
w = re.findall(r"Bit/dim [^|(]*\([0-9\.]*\)", line)
if w: w = re.findall(r"\([0-9\.]*\)", w[0])
if w: w = re.findall(r"[0-9\.]+", w[0])
if w:
losses.append(float(w[0]))
return losses, nfes
cifar10_loss, cifar10_nfes = get_values(CIFAR10)
cifar10_sn_loss, cifar10_sn_nfes = get_values(CIFAR10_SN)
mnist_loss, mnist_nfes = get_values(MNIST)
mnist_sn_loss, mnist_sn_nfes = get_values(MNIST_SN)
import brewer2mpl
line_colors = brewer2mpl.get_map('Set2', 'qualitative', 4).mpl_colors
dark_colors = brewer2mpl.get_map('Dark2', 'qualitative', 4).mpl_colors
plt.style.use('ggplot')
# CIFAR10 plot
plt.figure(figsize=(6, 7))
plt.scatter(cifar10_nfes, cifar10_loss, color=line_colors[1], label="w/o Spectral Norm")
plt.scatter(cifar10_sn_nfes, cifar10_sn_loss, color=line_colors[2], label="w/ Spectral Norm")
plt.ylim([3, 5])
plt.legend(fontsize=18)
plt.xlabel("NFE", fontsize=30)
plt.ylabel("Bits/dim", fontsize=30)
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=24)
ax.tick_params(axis='both', which='minor', labelsize=16)
ax.yaxis.set_ticks([3, 3.5, 4, 4.5, 5])
plt.tight_layout()
plt.savefig('cifar10_sn_loss_vs_nfe.pdf')
# MNIST plot
plt.figure(figsize=(6, 7))
plt.scatter(mnist_nfes, mnist_loss, color=line_colors[1], label="w/o Spectral Norm")
plt.scatter(mnist_sn_nfes, mnist_sn_loss, color=line_colors[2], label="w/ Spectral Norm")
plt.ylim([0.9, 2])
plt.legend(fontsize=18)
plt.xlabel("NFE", fontsize=30)
plt.ylabel("Bits/dim", fontsize=30)
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=24)
ax.tick_params(axis='both', which='minor', labelsize=16)
# ax.yaxis.set_ticks([3, 3.5, 4, 4.5, 5])
plt.tight_layout()
plt.savefig('mnist_sn_loss_vs_nfe.pdf')
| 2,407 | 28.365854 | 93 | py |
steer | steer-master/ffjord/diagnostics/scrap_log.py | import os
import re
import csv
def log_to_csv(log_filename, csv_filename):
with open(log_filename, 'r') as f:
lines = f.readlines()
with open(csv_filename, 'w', newline='') as csvfile:
fieldnames = None
writer = None
for line in lines:
if line.startswith('Iter'):
# A dictionary of quantity : value.
quants = _line_to_dict(line)
# Create writer and write header.
if fieldnames is None:
fieldnames = quants.keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# Write a line.
writer.writerow(quants)
def _line_to_dict(line):
line = re.sub(':', '', line) # strip colons.
line = re.sub('\([^)]*\)', '', line) # strip running averages.
quants = {}
for quant_str in line.split('|'):
quant_str = quant_str.strip() # strip beginning and ending whitespaces.
key, val = quant_str.split(' ')
quants[key] = val
return quants
def plot_pairplot(csv_filename, fig_filename, top=None):
import seaborn as sns
import pandas as pd
sns.set(style="ticks", color_codes=True)
quants = pd.read_csv(csv_filename)
if top is not None:
quants = quants[:top]
g = sns.pairplot(quants, kind='reg', diag_kind='kde', markers='.')
g.savefig(fig_filename)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--log', type=str, required=True)
parser.add_argument('--top_iters', type=int, default=None)
args = parser.parse_args()
print('Parsing log into csv.')
log_to_csv(args.log, args.log + '.csv')
print('Creating correlation plot.')
plot_pairplot(args.log + '.csv', os.path.join(os.path.dirname(args.log), 'quants.png'), args.top_iters)
| 1,925 | 28.630769 | 107 | py |
steer | steer-master/ffjord/diagnostics/plot_nfe_vs_dim_vae.py | import os.path
import re
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import scipy.ndimage
import seaborn as sns
sns.set_style("whitegrid")
colors = ["windows blue", "amber", "greyish", "faded green", "dusty purple"]
sns.palplot(sns.xkcd_palette(colors))
dims = [16, 32, 48, 64]
dirs = [
'vae_mnist_cnf_num_flows_4_256-256_num_blocks_1__2018-09-16_17_27_03',
'vae_mnist_cnf_num_flows_4_256-256_num_blocks_1__2018-09-16_17_26_41',
'vae_mnist_cnf_num_flows_4_256-256_num_blocks_1__2018-09-16_17_23_35',
'vae_mnist_cnf_num_flows_4_256-256_num_blocks_1__2018-09-16_17_25_03',
]
nfe_all = []
for dim, dirname in zip(dims, dirs):
with open(os.path.join('snapshots', dirname, 'logs'), 'r') as f:
lines = f.readlines()
nfes_ = []
for line in lines:
w = re.findall(r"NFE Forward [0-9]*", line)
if w: w = re.findall(r"[0-9]+", w[0])
if w:
nfes_.append(float(w[0]))
nfe_all.append(nfes_)
plt.figure(figsize=(4, 2.4))
for i, (dim, nfes) in enumerate(zip(dims, nfe_all)):
nfes = np.array(nfes)
xx = (np.arange(len(nfes)) + 1) / 50
nfes = scipy.ndimage.gaussian_filter(nfes, 101)
plt.plot(xx, nfes, '--', label='Dim {}'.format(dim))
plt.legend(frameon=True, fontsize=10.5)
plt.xlabel('Epoch', fontsize=18)
plt.ylabel('NFE', fontsize=18)
plt.xlim([0, 200])
plt.tight_layout()
plt.savefig("nfes_vs_dim_vae.pdf")
| 1,443 | 27.313725 | 76 | py |
steer | steer-master/ffjord/diagnostics/viz_toy.py | import os
import math
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_trajectory(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu'):
model.eval()
# Sample from prior
z_samples = torch.randn(2000, 2).to(device)
# sample from a grid
npts = 800
side = np.linspace(-4, 4, npts)
xx, yy = np.meshgrid(side, side)
xx = torch.from_numpy(xx).type(torch.float32).to(device)
yy = torch.from_numpy(yy).type(torch.float32).to(device)
z_grid = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1)], 1)
with torch.no_grad():
# We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), 1, keepdim=True)
logp_grid = torch.sum(standard_normal_logprob(z_grid), 1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
z_traj, _ = cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
z_traj = z_traj.cpu().numpy()
grid_z_traj, grid_logpz_traj = [], []
inds = torch.arange(0, z_grid.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(z_grid.shape[0] * memory)):
_grid_z_traj, _grid_logpz_traj = cnf(
z_grid[ii], logp_grid[ii], integration_times=integration_times, reverse=True
)
_grid_z_traj, _grid_logpz_traj = _grid_z_traj.cpu().numpy(), _grid_logpz_traj.cpu().numpy()
grid_z_traj.append(_grid_z_traj)
grid_logpz_traj.append(_grid_logpz_traj)
grid_z_traj = np.concatenate(grid_z_traj, axis=1)
grid_logpz_traj = np.concatenate(grid_logpz_traj, axis=1)
plt.figure(figsize=(8, 8))
for _ in range(z_traj.shape[0]):
plt.clf()
# plot target potential function
ax = plt.subplot(2, 2, 1, aspect="equal")
ax.hist2d(data_samples[:, 0], data_samples[:, 1], range=[[-4, 4], [-4, 4]], bins=200)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title("Target", fontsize=32)
# plot the density
ax = plt.subplot(2, 2, 2, aspect="equal")
z, logqz = grid_z_traj[t], grid_logpz_traj[t]
xx = z[:, 0].reshape(npts, npts)
yy = z[:, 1].reshape(npts, npts)
qz = np.exp(logqz).reshape(npts, npts)
plt.pcolormesh(xx, yy, qz)
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
cmap = matplotlib.cm.get_cmap(None)
#ax.set_axis_bgcolor(cmap(0.))
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title("Density", fontsize=32)
# plot the samples
ax = plt.subplot(2, 2, 3, aspect="equal")
zk = z_traj[t]
ax.hist2d(zk[:, 0], zk[:, 1], range=[[-4, 4], [-4, 4]], bins=200)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title("Samples", fontsize=32)
# plot vector field
ax = plt.subplot(2, 2, 4, aspect="equal")
K = 13j
y, x = np.mgrid[-4:4:K, -4:4:K]
K = int(K.imag)
zs = torch.from_numpy(np.stack([x, y], -1).reshape(K * K, 2)).to(device, torch.float32)
logps = torch.zeros(zs.shape[0], 1).to(device, torch.float32)
dydt = cnf.odefunc(integration_times[t], (zs, logps))[0]
dydt = -dydt.cpu().detach().numpy()
dydt = dydt.reshape(K, K, 2)
logmag = 2 * np.log(np.hypot(dydt[:, :, 0], dydt[:, :, 1]))
ax.quiver(
x, y, dydt[:, :, 0], dydt[:, :, 1],
np.exp(logmag), cmap="coolwarm", scale=20., width=0.015, pivot="mid"
)
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
ax.axis("off")
ax.set_title("Vector Field", fontsize=32)
makedirs(savedir)
plt.savefig(os.path.join(savedir, f"viz-{t:05d}.jpg"))
t += 1
def trajectory_to_video(savedir):
import subprocess
bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, 'viz-%05d.jpg'), os.path.join(savedir, 'traj.mp4'))
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if __name__ == '__main__':
import argparse
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')))
import lib.toy_data as toy_data
from train_misc import count_parameters
from train_misc import set_cnf_options, add_spectral_norm, create_regularization_fns
from train_misc import build_model_tabular
def get_ckpt_model_and_data(args):
# Load checkpoint.
checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
ckpt_args = checkpt['args']
state_dict = checkpt['state_dict']
# Construct model and restore checkpoint.
regularization_fns, regularization_coeffs = create_regularization_fns(ckpt_args)
model = build_model_tabular(ckpt_args, 2, regularization_fns).to(device)
if ckpt_args.spectral_norm: add_spectral_norm(model)
set_cnf_options(ckpt_args, model)
model.load_state_dict(state_dict)
model.to(device)
print(model)
print("Number of trainable parameters: {}".format(count_parameters(model)))
# Load samples from dataset
data_samples = toy_data.inf_train_gen(ckpt_args.data, batch_size=2000)
return model, data_samples
parser = argparse.ArgumentParser()
parser.add_argument('--checkpt', type=str, required=True)
parser.add_argument('--ntimes', type=int, default=101)
parser.add_argument('--memory', type=float, default=0.01, help='Higher this number, the more memory is consumed.')
parser.add_argument('--save', type=str, default='trajectory')
args = parser.parse_args()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model, data_samples = get_ckpt_model_and_data(args)
save_trajectory(model, data_samples, args.save, ntimes=args.ntimes, memory=args.memory, device=device)
trajectory_to_video(args.save)
| 7,028 | 38.05 | 119 | py |
steer | steer-master/ffjord/diagnostics/plot_losses.py | import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
CIFAR10 = "diagnostics/cifar10_multiscale.log"
CIFAR10_SN = "diagnostics/cifar10_multiscale_sn.log"
MNIST = "diagnostics/mnist_multiscale.log"
MNIST_SN = "diagnostics/mnist_multiscale_sn.log"
def get_values(filename):
with open(filename, "r") as f:
lines = f.readlines()
losses = []
nfes = []
for line in lines:
w = re.findall(r"Steps [^|(]*\([0-9\.]*\)", line)
if w: w = re.findall(r"\([0-9\.]*\)", w[0])
if w: w = re.findall(r"[0-9\.]+", w[0])
if w:
nfes.append(float(w[0]))
w = re.findall(r"Bit/dim [^|(]*\([0-9\.]*\)", line)
if w: w = re.findall(r"\([0-9\.]*\)", w[0])
if w: w = re.findall(r"[0-9\.]+", w[0])
if w:
losses.append(float(w[0]))
return losses, nfes
cifar10_loss, cifar10_nfes = get_values(CIFAR10)
cifar10_sn_loss, cifar10_sn_nfes = get_values(CIFAR10_SN)
mnist_loss, mnist_nfes = get_values(MNIST)
mnist_sn_loss, mnist_sn_nfes = get_values(MNIST_SN)
import brewer2mpl
line_colors = brewer2mpl.get_map('Set2', 'qualitative', 4).mpl_colors
dark_colors = brewer2mpl.get_map('Dark2', 'qualitative', 4).mpl_colors
plt.style.use('ggplot')
# CIFAR10 plot
plt.figure(figsize=(6, 7))
plt.scatter(cifar10_nfes, cifar10_loss, color=line_colors[1], label="w/o Spectral Norm")
plt.scatter(cifar10_sn_nfes, cifar10_sn_loss, color=line_colors[2], label="w/ Spectral Norm")
plt.ylim([3, 5])
plt.legend(fontsize=18)
plt.xlabel("NFE", fontsize=30)
plt.ylabel("Bits/dim", fontsize=30)
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=24)
ax.tick_params(axis='both', which='minor', labelsize=16)
ax.yaxis.set_ticks([3, 3.5, 4, 4.5, 5])
plt.tight_layout()
plt.savefig('cifar10_sn_loss_vs_nfe.pdf')
# MNIST plot
plt.figure(figsize=(6, 7))
plt.scatter(mnist_nfes, mnist_loss, color=line_colors[1], label="w/o Spectral Norm")
plt.scatter(mnist_sn_nfes, mnist_sn_loss, color=line_colors[2], label="w/ Spectral Norm")
plt.ylim([0.9, 2])
plt.legend(fontsize=18)
plt.xlabel("NFE", fontsize=30)
plt.ylabel("Bits/dim", fontsize=30)
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=24)
ax.tick_params(axis='both', which='minor', labelsize=16)
# ax.yaxis.set_ticks([3, 3.5, 4, 4.5, 5])
plt.tight_layout()
plt.savefig('mnist_sn_loss_vs_nfe.pdf')
| 2,407 | 28.365854 | 93 | py |
steer | steer-master/ffjord/diagnostics/viz_cnf.py | from inspect import getsourcefile
import sys
import os
import subprocess
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import argparse
import torch
import torchvision.datasets as dset
import torchvision.transforms as tforms
from torchvision.utils import save_image
import lib.layers as layers
import lib.spectral_norm as spectral_norm
import lib.utils as utils
def add_noise(x):
"""
[0, 1] -> [0, 255] -> add noise -> [0, 1]
"""
noise = x.new().resize_as_(x).uniform_()
x = x * 255 + noise
x = x / 256
return x
def get_dataset(args):
trans = lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise])
if args.data == "mnist":
im_dim = 1
im_size = 28 if args.imagesize is None else args.imagesize
train_set = dset.MNIST(root="./data", train=True, transform=trans(im_size), download=True)
test_set = dset.MNIST(root="./data", train=False, transform=trans(im_size), download=True)
elif args.data == "svhn":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.SVHN(root="./data", split="train", transform=trans(im_size), download=True)
test_set = dset.SVHN(root="./data", split="test", transform=trans(im_size), download=True)
elif args.data == "cifar10":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.CIFAR10(root="./data", train=True, transform=trans(im_size), download=True)
test_set = dset.CIFAR10(root="./data", train=False, transform=trans(im_size), download=True)
elif args.dataset == 'celeba':
im_dim = 3
im_size = 64 if args.imagesize is None else args.imagesize
train_set = dset.CelebA(
train=True, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(im_size),
tforms.RandomHorizontalFlip(),
tforms.ToTensor(),
add_noise,
])
)
test_set = dset.CelebA(
train=False, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(args.imagesize),
tforms.ToTensor(),
add_noise,
])
)
data_shape = (im_dim, im_size, im_size)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=args.batch_size, shuffle=False)
return train_loader, test_loader, data_shape
def add_spectral_norm(model):
def recursive_apply_sn(parent_module):
for child_name in list(parent_module._modules.keys()):
child_module = parent_module._modules[child_name]
classname = child_module.__class__.__name__
if classname.find('Conv') != -1 and 'weight' in child_module._parameters:
del parent_module._modules[child_name]
parent_module.add_module(child_name, spectral_norm.spectral_norm(child_module, 'weight'))
else:
recursive_apply_sn(child_module)
recursive_apply_sn(model)
def build_model(args, state_dict):
# load dataset
train_loader, test_loader, data_shape = get_dataset(args)
hidden_dims = tuple(map(int, args.dims.split(",")))
strides = tuple(map(int, args.strides.split(",")))
# neural net that parameterizes the velocity field
if args.autoencode:
def build_cnf():
autoencoder_diffeq = layers.AutoencoderDiffEqNet(
hidden_dims=hidden_dims,
input_shape=data_shape,
strides=strides,
conv=args.conv,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
odefunc = layers.AutoencoderODEfunc(
autoencoder_diffeq=autoencoder_diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
cnf = layers.CNF(
odefunc=odefunc,
T=args.time_length,
solver=args.solver,
)
return cnf
else:
def build_cnf():
diffeq = layers.ODEnet(
hidden_dims=hidden_dims,
input_shape=data_shape,
strides=strides,
conv=args.conv,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
odefunc = layers.ODEfunc(
diffeq=diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
cnf = layers.CNF(
odefunc=odefunc,
T=args.time_length,
solver=args.solver,
)
return cnf
chain = [layers.LogitTransform(alpha=args.alpha), build_cnf()]
if args.batch_norm:
chain.append(layers.MovingBatchNorm2d(data_shape[0]))
model = layers.SequentialFlow(chain)
if args.spectral_norm:
add_spectral_norm(model)
model.load_state_dict(state_dict)
return model, test_loader.dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser("Visualizes experiments trained using train_cnf.py.")
parser.add_argument("--checkpt", type=str, required=True)
parser.add_argument("--nsamples", type=int, default=50)
parser.add_argument("--ntimes", type=int, default=100)
parser.add_argument("--save", type=str, default="imgs")
args = parser.parse_args()
checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
ck_args = checkpt["args"]
state_dict = checkpt["state_dict"]
model, test_set = build_model(ck_args, state_dict)
real_samples = torch.stack([test_set[i][0] for i in range(args.nsamples)], dim=0)
data_shape = real_samples.shape[1:]
fake_latents = torch.randn(args.nsamples, *data_shape)
# Transfer to GPU if available.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Running on {}".format(device))
model.to(device)
real_samples = real_samples.to(device)
fake_latents = fake_latents.to(device)
# Construct fake samples
fake_samples = model(fake_latents, reverse=True).view(-1, *data_shape)
samples = torch.cat([real_samples, fake_samples], dim=0)
still_diffeq = torch.zeros_like(samples)
im_indx = 0
# Image-saving helper function
def save_im(im, diffeq):
global im_indx
filename = os.path.join(current_dir, args.save, "flow_%05d.png" % im_indx)
utils.makedirs(os.path.dirname(filename))
diffeq = diffeq.clone()
de_min, de_max = float(diffeq.min()), float(diffeq.max())
diffeq.clamp_(min=de_min, max=de_max)
diffeq.add_(-de_min).div_(de_max - de_min + 1e-5)
assert im.shape == diffeq.shape
shape = im.shape
interleaved = torch.stack([im, diffeq]).transpose(0, 1).contiguous().view(2 * shape[0], *shape[1:])
save_image(interleaved, filename, nrow=20, padding=0, range=(0, 1))
im_indx += 1
# Still frames with image samples.
for _ in range(30):
save_im(samples, still_diffeq)
# Forward image to latent.
logits = model.chain[0](samples)
for i in range(1, len(model.chain)):
assert isinstance(model.chain[i], layers.CNF)
cnf = model.chain[i]
tt = torch.linspace(cnf.integration_times[0], cnf.integration_times[-1], args.ntimes)
z_t = cnf(logits, integration_times=tt)
logits = z_t[-1]
# transform back to image space
im_t = model.chain[0](z_t.view(args.ntimes * args.nsamples * 2, *data_shape),
reverse=True).view(args.ntimes, 2 * args.nsamples, *data_shape)
# save each step as an image
for t, im in zip(tt, im_t):
diffeq = cnf.odefunc(t, (im, None))[0]
diffeq = model.chain[0](diffeq, reverse=True)
save_im(im, diffeq)
# Still frames with latent samples.
latents = model.chain[0](logits, reverse=True)
for _ in range(30):
save_im(latents, still_diffeq)
# Forward image to latent.
for i in range(len(model.chain) - 1, 0, -1):
assert isinstance(model.chain[i], layers.CNF)
cnf = model.chain[i]
tt = torch.linspace(cnf.integration_times[-1], cnf.integration_times[0], args.ntimes)
z_t = cnf(logits, integration_times=tt)
logits = z_t[-1]
# transform back to image space
im_t = model.chain[0](z_t.view(args.ntimes * args.nsamples * 2, *data_shape),
reverse=True).view(args.ntimes, 2 * args.nsamples, *data_shape)
# save each step as an image
for t, im in zip(tt, im_t):
diffeq = cnf.odefunc(t, (im, None))[0]
diffeq = model.chain[0](diffeq, reverse=True)
save_im(im, -diffeq)
# Combine the images into a movie
bashCommand = r"ffmpeg -y -i {}/flow_%05d.png {}".format(
os.path.join(current_dir, args.save), os.path.join(current_dir, args.save, "flow.mp4")
)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
| 9,576 | 36.120155 | 107 | py |
steer | steer-master/ffjord/diagnostics/viz_fig1.py | import os
import math
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
from scipy import interpolate as interp
import lib.utils as utils
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_fig1(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu'):
model.eval()
# Sample from prior
# z_samples = torch.randn(20, 200).to(device)
z_samples = torch.randn(20, 50).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), 1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
z_traj, _ = cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
z_traj = z_traj.cpu().numpy()
makedirs(savedir)
for sample in range(z_traj.shape[1]):
plt.clf()
plt.imshow(z_traj[:,sample,:],cmap='plasma')
plt.xaxis
plt.savefig(os.path.join(savedir, "fig1_"+str(sample)+".jpg"))
def save_fig1_rev(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu'):
model.eval()
data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
# z_samples = torch.randn(20, 200).to(device)
z_samples = torch.randn(20, 50).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), 1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
z_traj, _ = cnf(data_samples[0:1], logp_samples[0:1], integration_times=integration_times, reverse=False)
z_traj = z_traj.cpu().numpy()
print('zt',z_traj.shape)
makedirs(savedir)
plt.clf()
plt.imshow(data_samples[0:1],cmap='plasma')
plt.savefig(os.path.join(savedir, "fig1_data.jpg"))
for sample in range(z_traj.shape[1]):
plt.clf()
plt.imshow(z_traj[:,sample,:],cmap='plasma')
plt.savefig(os.path.join(savedir, "fig1_forward"+str(sample)+".jpg"))
def save_fig1_1d_ptd(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,100)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs / maxs
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 7))
fig.set_tight_layout({'pad': 0.1, 'h_pad': -1.0})
axes[0].scatter(znp,np.exp(np.array(logp)[0,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[0,:,0]),cmap='viridis')
axes[0].set_xlim(-4,4)
axes[0].set_ylabel(r"$p(z(t_1))$",labelpad=20)
axes[0].set_yticks([])
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[0].set_ylim(bottom=0.0)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.9,density=(0.7,0.5),arrowsize=0.8)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
axes[1].get_xaxis().set_visible(False)
# axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
axes[2].scatter(znp,np.exp(np.array(logp)[-1,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[-1,:,0]),cmap='viridis')
axes[2].set_xlim(-4,4)
axes[2].set_ylabel(r"$p(z(t_0))$",labelpad=20)
axes[2].set_xlabel(r"$z$")
axes[2].set_yticks([])
axes[2].set_xticks([])
# axes[2].get_xaxis().set_visible(False)
axes[2].spines['top'].set_visible(False)
axes[2].spines['right'].set_visible(False)
axes[2].set_ylim(bottom=0.)
# fig.subplots_adjust(hspace=0.)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
pos2 = axes[2].get_position(original=False)
print(pos0.y0)
print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
plt.savefig(os.path.join(savedir, "fig1_1d_together"+str(itr)+".png"),pad_inches=0,bbox_inches='tight',dpi=350)
def save_fig1_1d_ptd_timescrub(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,100)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
for timerow in range(integration_times.shape[0]):
plt.clf()
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs / maxs
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5]},
figsize=(8,13))
fig.set_tight_layout({'pad': 0.1, 'h_pad': -1.0})
axes[0].scatter(znp,np.exp(np.array(logp)[::-1][timerow,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[::-1][timerow,:,0]),cmap='viridis')
axes[0].set_xlim(-4,4)
axes[0].set_ylim(0,.42)
axes[0].set_ylabel(r"$p(z(t))$",labelpad=20)
axes[0].set_yticks([])
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[0].set_ylim(bottom=0.0)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.9,density=(0.7,0.5),arrowsize=0.8)
axes[1].plot([-4,4],[integration_times[timerow],integration_times[timerow]],c='red',zorder=100)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_xticks([])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
axes[1].set_xlabel(r"$z$")
axes[1].spines['top'].set_visible(False)
axes[1].spines['bottom'].set_visible(False)
axes[1].get_xaxis().set_visible(True)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
print(pos0.y0)
print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.15,pos1.x1,pos0.height])
plt.savefig(os.path.join(savedir, "fig1_1d_scrub"+str(timerow)+".png"),pad_inches=0,bbox_inches='tight')
plt.close()
def save_fig1_1d_NF(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',dpi=350):
model.eval()
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
# for planar in model.chain:
# end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
# integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
for planar in model.chain[t:]:
print(t)
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,100)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
# plt.imshow(logp_traj[:,:,0],cmap='plasma')
# plt.imshow(np.exp(np.array(logp)[:,:,0]),cmap='plasma',extent=[-4,4,0,1])
# plt.tight_layout()
# plt.savefig(os.path.join(savedir, "fig1_1d.jpg"))
# plt.clf()
# plt.plot(z_traj[:,:,0].cpu().numpy())
# plt.savefig(os.path.join(savedir, "fig1_1d_traj.jpg"))
# nm = matplotlib.colors.Normalize(0.05,0.45,True)
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs / maxs
# plt.clf()
# plt.axis('off')
# plt.tight_layout()
# plt.imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=40.)
# plt.streamplot(xs,ts,dxs,dts,color='white',linewidth=0.7,density=(0.5,2.))
# plt.savefig(os.path.join(savedir, "fig1_1d_stream.pdf"),pad_inches=0,bbox_inches='tight')
# plt.clf()
# plt.axis('off')
# plt.tight_layout()
# plt.plot(np.exp(np.array(logp)[0,:,0]))
# plt.savefig(os.path.join(savedir, "fig1_1d_t1.pdf"),pad_inches=0,bbox_inches='tight')
# plt.clf()
# plt.axis('off')
# plt.tight_layout()
# plt.plot(np.exp(np.array(logp)[-1,:,0]))
# plt.savefig(os.path.join(savedir, "fig1_1d_t0.pdf"),pad_inches=0,bbox_inches='tight')
# plt.clf()
# plt.axis('off')
# plt.tight_layout()
# plt.subplot2grid((8,1),(0,0))
# plt.axis('off')
# plt.tight_layout()
# plt.plot(znp,np.exp(np.array(logp)[-1,:,0]))
# plt.subplot2grid((8,1),(1,0),rowspan=6)
# plt.axis('off')
# plt.tight_layout()
# plt.imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=30.)
# plt.streamplot(xs,ts,dxs,dts,color='white',linewidth=0.7,density=(0.5,2.))
# plt.subplot2grid((8,1),(7,0))
# plt.axis('off')
# plt.tight_layout()
# plt.plot(znp,np.exp(np.array(logp)[0,:,0]))
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 7))
fig.set_tight_layout({'pad': 0.1, 'h_pad': -1.0})
# axes[1].set_aspect(30, share=True)
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
# axes[0].plot(znp,np.exp(np.array(logp)[0,:,0]))
axes[0].scatter(znp,np.exp(np.array(logp)[0,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[0,:,0]),cmap='viridis')
axes[0].set_xlim(-4,4)
axes[0].set_ylabel(r"$p(z(t_1))$",labelpad=20)
axes[0].set_yticks([])
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
# axes[0].spines['bottom'].set_visible(False)
# axes[0].axis('off')
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.9,density=(0.4,2.5))
# axes[1].set_axis_off()
axes[1].set_xlim(-4,4)
# axes[1].axis('off')
axes[1].set_yticks([0,0.5])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
axes[1].get_xaxis().set_visible(False)
# axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
axes[2].scatter(znp,np.exp(np.array(logp)[-1,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[-1,:,0]),cmap='viridis')
axes[2].set_xlim(-4,4)
axes[2].set_ylabel(r"$p(z(t_0))$",labelpad=20)
axes[2].set_xlabel(r"$z$")
axes[2].set_yticks([])
axes[2].set_xticks([])
# axes[2].get_xaxis().set_visible(False)
axes[2].spines['top'].set_visible(False)
axes[2].spines['right'].set_visible(False)
# fig.subplots_adjust(hspace=0.)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
pos2 = axes[2].get_position(original=False)
print(pos0.y0)
print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
plt.savefig(os.path.join(savedir, "fig1_1d_together.pdf"),pad_inches=0,bbox_inches='tight')
# if __name__ == '__main__':
# plt.figure(figsize=(8, 8))
# for _ in range(z_traj.shape[0]):
# plt.clf()
# # plot target potential function
# ax = plt.subplot(2, 2, 1, aspect="equal")
# ax.hist2d(data_samples[:, 0], data_samples[:, 1], range=[[-4, 4], [-4, 4]], bins=200)
# ax.invert_yaxis()
# ax.get_xaxis().set_ticks([])
# ax.get_yaxis().set_ticks([])
# ax.set_title("Target", fontsize=32)
# # plot the density
# ax = plt.subplot(2, 2, 2, aspect="equal")
# z, logqz = grid_z_traj[t], grid_logpz_traj[t]
# xx = z[:, 0].reshape(npts, npts)
# yy = z[:, 1].reshape(npts, npts)
# qz = np.exp(logqz).reshape(npts, npts)
# plt.pcolormesh(xx, yy, qz)
# ax.set_xlim(-4, 4)
# ax.set_ylim(-4, 4)
# cmap = matplotlib.cm.get_cmap(None)
# ax.set_axis_bgcolor(cmap(0.))
# ax.invert_yaxis()
# ax.get_xaxis().set_ticks([])
# ax.get_yaxis().set_ticks([])
# ax.set_title("Density", fontsize=32)
# # plot the samples
# ax = plt.subplot(2, 2, 3, aspect="equal")
# zk = z_traj[t]
# ax.hist2d(zk[:, 0], zk[:, 1], range=[[-4, 4], [-4, 4]], bins=200)
# ax.invert_yaxis()
# ax.get_xaxis().set_ticks([])
# ax.get_yaxis().set_ticks([])
# ax.set_title("Samples", fontsize=32)
# # plot vector field
# ax = plt.subplot(2, 2, 4, aspect="equal")
# K = 13j
# y, x = np.mgrid[-4:4:K, -4:4:K]
# K = int(K.imag)
# zs = torch.from_numpy(np.stack([x, y], -1).reshape(K * K, 2)).to(device, torch.float32)
# logps = torch.zeros(zs.shape[0], 1).to(device, torch.float32)
# dydt = cnf.odefunc(integration_times[t], (zs, logps))[0]
# dydt = -dydt.cpu().numpy()
# dydt = dydt.reshape(K, K, 2)
# logmag = 2 * np.log(np.hypot(dydt[:, :, 0], dydt[:, :, 1]))
# ax.quiver(
# x, y, dydt[:, :, 0], dydt[:, :, 1],
# np.exp(logmag), cmap="coolwarm", scale=20., width=0.015, pivot="mid"
# )
# ax.set_xlim(-4, 4)
# ax.set_ylim(-4, 4)
# ax.axis("off")
# ax.set_title("Vector Field", fontsize=32)
# makedirs(savedir)
# plt.savefig(os.path.join(savedir, f"viz-{t:05d}.jpg"))
# t += 1
# def trajectory_to_video(savedir):
# import subprocess
# bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, 'viz-%05d.jpg'), os.path.join(savedir, 'traj.mp4'))
# process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
# output, error = process.communicate()
# if __name__ == '__main__':
# import argparse
# import sys
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')))
# import lib.toy_data as toy_data
# from train_misc import count_parameters
# from train_misc import set_cnf_options, add_spectral_norm, create_regularization_fns
# from train_misc import build_model_toy2d
# def get_ckpt_model_and_data(args):
# # Load checkpoint.
# checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
# ckpt_args = checkpt['args']
# state_dict = checkpt['state_dict']
# # Construct model and restore checkpoint.
# regularization_fns, regularization_coeffs = create_regularization_fns(ckpt_args)
# model = build_model_toy2d(ckpt_args, regularization_fns).to(device)
# if ckpt_args.spectral_norm: add_spectral_norm(model)
# set_cnf_options(ckpt_args, model)
# model.load_state_dict(state_dict)
# model.to(device)
# print(model)
# print("Number of trainable parameters: {}".format(count_parameters(model)))
# # Load samples from dataset
# data_samples = toy_data.inf_train_gen(ckpt_args.data, batch_size=2000)
# return model, data_samples
# parser = argparse.ArgumentParser()
# parser.add_argument('--checkpt', type=str, required=True)
# parser.add_argument('--ntimes', type=int, default=101)
# parser.add_argument('--memory', type=float, default=0.01, help='Higher this number, the more memory is consumed.')
# parser.add_argument('--save', type=str, default='trajectory')
# args = parser.parse_args()
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# model, data_samples = get_ckpt_model_and_data(args)
# save_trajectory(model, data_samples, args.save, ntimes=args.ntimes, memory=args.memory, device=device)
# trajectory_to_video(args.save)
def save_fig1_1d_icml(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0.0001, end_time, ntimes)
def log_prob(t):
#z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([0,t]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.diffeq(t, x)
ts = np.linspace(0.0001,end_time,101)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
#z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
#z_traj = z_traj.cpu().numpy()
#logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 8))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
probs = np.exp(np.array(logp)[:,:,0])
probs = probs[::-1]
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs
# probs = probs / maxs
# for smple in range(len(z_samples)):
for smple in [38,50,55, 59]:
animate = True
if animate:
Trange = range(len(integration_times))
else:
Trange = [len(integration_times)-1]
for T in Trange:
plt.clf()
plt.close()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 7))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
print("plotting ",T)
ztr0 = z_traj.numpy()[0,smple,0]
pztr0 = np.exp(np.array(logp_traj)[0,smple,0])
ztrT = z_traj.numpy()[T,smple,0]
pztrT = np.exp(np.array(logp_traj)[T,smple,0])
probts = np.exp(np.array(logp))[::-1]
# axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
sc0 = axes[0].scatter(znp,np.exp(np.array(logp)[-1,:,0]),s=0.5,marker=None,linestyle='-',c=probs[-1,:],cmap='viridis')
#axes[0].scatter([ztr0],[0.],color="#F012BE",s=10.,zorder=5,clip_on=False)
#axes[0].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.4,linewidth=0.5,zorder=2)
#axes[0].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.4,linewidth=0.5,clip_on=False,zorder=2)
#axes[0].plot([ztrT, ztrT],[0.,pztrT], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
#axes[0].plot([ztrT, znp[0]],[pztrT,pztrT], color="#2ECC40",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
#axes[0].plot([znp[0], znp[0]],[pztr0,pztrT], color="#2ECC40",alpha=1.0,linewidth=1,zorder=10,clip_on=False)
axes[0].set_xlim(-4,4)
axes[0].set_ylim(0.,maxs[-1][0])
axes[0].set_ylabel(r"$p(z_{t})$",labelpad=20)
axes[0].set_yticks([min(0.9*pztr0,(pztrT+pztr0)/2)])
axes[0].set_yticklabels([r"$\Delta$"], color="#2ECC40")
axes[0].tick_params(width=0,labelsize=10)
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
# axes[0].set_clip_on(False)
axes[0].set_zorder(2)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
#axes[1].plot(z_traj[0:T,smple,0].numpy(),ts[0:T], color="#F012BE")
#axes[1].scatter([ztr0],[0.], color="#F012BE", s=10.,zorder=5)
#axes[1].scatter([ztrT],[ts[T]], color="#F012BE",s=10.,zorder=5,clip_on=True)
#axes[1].scatter([ztrT],[ts[-1]], color="#F012BE",s=10.,zorder=5,clip_on=True,visible=False)#hack
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
#axes[1].streamplot(xs,ts[::-1],dxs,dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
# axes[1].get_xaxis().set_visible(False)
axes[1].spines['bottom'].set_position('zero')
# axes[1].spines['bottom'].set_zorder(0.)
axes[1].spines['top'].set_position(('data',0.5))
# axes[1].spines['top'].set_zorder(0.)
axes[1].spines['left'].set_bounds(0.,0.5)
axes[1].spines['right'].set_bounds(0.,0.5)
axes[1].set_clip_on(True)
sc2 = axes[2].scatter(znp,probts[T,:,0],s=0.5,marker=None,linestyle='-',c=probts[T,:,0],cmap='viridis',zorder=3)
sc2.set_clim(0.,maxs[-1][0])
#axes[2].scatter(ztrT,[0.], color="#F012BE",s=10.,zorder=5,clip_on=False)
#axes[2].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5)
#axes[2].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.8,linewidth=0.5,clip_on=False)
axes[2].set_xlim(-4,4)
axes[2].set_ylabel(r"$p(z_{t_0})$",labelpad=20)
axes[2].set_xlabel(r"$z$")
axes[2].set_yticks([])
axes[2].set_xticks([])
# axes[2].get_xaxis().set_visible(False)
axes[2].spines['top'].set_visible(False)
axes[2].spines['right'].set_visible(False)
axes[2].set_ylim(bottom=0.)
# fig.subplots_adjust(hspace=0.)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
pos2 = axes[2].get_position(original=False)
# print(pos0.y0)
# print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
if animate:
makedirs(os.path.join(savedir, "anim",'{:0>4}'.format(str(smple))))
plt.savefig(os.path.join(savedir, "anim",'{:0>4}'.format(str(smple)),"img-"+'{:0>4}'.format(str(T))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=300)
plt.savefig(os.path.join(savedir, "fig1_1d_together"+'{:0>4}'.format(str(smple))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=350)
#;
# ffmpeg -r 24 -i experiments/fig1_1d_toy/fig1_ani/anim/0038/%04.png -c:v libx264 -crf 20 -pix_fmt yuv420p experiments/fig1_1d_toy/fig1_ani/animate.mp4
# ffmpeg -r 24 -i %03.png -c:v libx264 -crf 20 -pix_fmt yuv420p animate.mp4
# ffmpeg -f concat -safe 0 -i anim-list -r 24 -c:v libx264 -crf 20 -pix_fmt yuv420p ffjord-sample-rev.mp4
def save_fig1_1d_icml_no_top_or_bottom(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0.0001, end_time, ntimes)
def log_prob(t):
#z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([0,t]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.diffeq(t, x)
ts = np.linspace(0.0001,end_time,101)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
#z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
#z_traj = z_traj.cpu().numpy()
#logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 8))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
probs = np.exp(np.array(logp)[:,:,0])
probs = probs[::-1]
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs
# probs = probs / maxs
# for smple in range(len(z_samples)):
for smple in [38,50,55, 59]:
animate = True
if animate:
Trange = range(len(integration_times))
else:
Trange = [len(integration_times)-1]
for T in Trange:
plt.clf()
plt.close()
plt.rcParams.update({'font.size': 13})
#fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True,
# gridspec_kw={'height_ratios': [5000, 1]},
# figsize=(4, 7))
#fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
print("plotting ",T)
ztr0 = z_traj.numpy()[0,smple,0]
pztr0 = np.exp(np.array(logp_traj)[0,smple,0])
ztrT = z_traj.numpy()[T,smple,0]
pztrT = np.exp(np.array(logp_traj)[T,smple,0])
probts = np.exp(np.array(logp))[::-1]
## axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
#sc0 = axes[0].scatter(znp,np.exp(np.array(logp)[-1,:,0]),s=0.5,marker=None,linestyle='-',c=probs[-1,:],cmap='viridis')
##axes[0].scatter([ztr0],[0.],color="#F012BE",s=10.,zorder=5,clip_on=False)
##axes[0].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.4,linewidth=0.5,zorder=2)
##axes[0].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.4,linewidth=0.5,clip_on=False,zorder=2)
##axes[0].plot([ztrT, ztrT],[0.,pztrT], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
##axes[0].plot([ztrT, znp[0]],[pztrT,pztrT], color="#2ECC40",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
##axes[0].plot([znp[0], znp[0]],[pztr0,pztrT], color="#2ECC40",alpha=1.0,linewidth=1,zorder=10,clip_on=False)
#axes[0].set_xlim(-4,4)
#axes[0].set_ylim(0.,maxs[-1][0])
#axes[0].set_ylabel(r"$p(z_{t})$",labelpad=20)
#axes[0].set_yticks([min(0.9*pztr0,(pztrT+pztr0)/2)])
#axes[0].set_yticklabels([r"$\Delta$"], color="#2ECC40")
#axes[0].tick_params(width=0,labelsize=10)
#axes[0].get_xaxis().set_visible(False)
#axes[0].spines['top'].set_visible(False)
#axes[0].spines['right'].set_visible(False)
## axes[0].set_clip_on(False)
#axes[0].set_zorder(2)
plt.imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
#axes[1].plot(z_traj[0:T,smple,0].numpy(),ts[0:T], color="#F012BE")
#axes[1].scatter([ztr0],[0.], color="#F012BE", s=10.,zorder=5)
#axes[1].scatter([ztrT],[ts[T]], color="#F012BE",s=10.,zorder=5,clip_on=True)
#axes[1].scatter([ztrT],[ts[-1]], color="#F012BE",s=10.,zorder=5,clip_on=True,visible=False)#hack
plt.streamplot(xs,ts,dxs,dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
#axes[1].streamplot(xs,ts[::-1],dxs,dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
#plt.set_xlim(-4,4)
#plt.set_yticks([0,0.5])
#plt.set_yticklabels([r"$0$",r"$1$"])
#plt.set_ylabel(r"$t$")
# axes[1].get_xaxis().set_visible(False)
#plt.spines['bottom'].set_position('zero')
## axes[1].spines['bottom'].set_zorder(0.)
#plt.spines['top'].set_position(('data',0.5))
## axes[1].spines['top'].set_zorder(0.)
#plt.spines['left'].set_bounds(0.,0.5)
#plt.spines['right'].set_bounds(0.,0.5)
#plt.set_clip_on(True)
#sc2 = axes[2].scatter(znp,probts[T,:,0],s=0.5,marker=None,linestyle='-',c=probts[T,:,0],cmap='viridis',zorder=3)
#sc2.set_clim(0.,maxs[-1][0])
##axes[2].scatter(ztrT,[0.], color="#F012BE",s=10.,zorder=5,clip_on=False)
##axes[2].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5)
##axes[2].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.8,linewidth=0.5,clip_on=False)
#axes[2].set_xlim(-4,4)
#axes[2].set_ylabel(r"$p(z_{t_0})$",labelpad=20)
#axes[2].set_xlabel(r"$z$")
#axes[2].set_yticks([])
#axes[2].set_xticks([])
## axes[2].get_xaxis().set_visible(False)
#axes[2].spines['top'].set_visible(False)
#axes[2].spines['right'].set_visible(False)
#axes[2].set_ylim(bottom=0.)
## fig.subplots_adjust(hspace=0.)
#pos0 = axes[0].get_position(original=False)
#pos1 = axes[1].get_position(original=False)
#pos2 = axes[2].get_position(original=False)
## print(pos0.y0)
## print(pos1.y0+pos1.height)
#axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
#axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
if animate:
makedirs(os.path.join(savedir, "anim",'{:0>4}'.format(str(smple))))
plt.savefig(os.path.join(savedir, "anim",'{:0>4}'.format(str(smple)),"img-"+'{:0>4}'.format(str(T))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=300)
plt.savefig(os.path.join(savedir, "fig1_1d_together"+'{:0>4}'.format(str(smple))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=350)
def save_fig1_1d_icml_rev(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
#return cnf.odefunc.odefunc.diffeq(t, x)
return cnf.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,101)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
z_traj = z_traj.numpy()[::-1,:,:]
logp_traj = logp_traj[::-1,:,:]
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 8))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs[::-1]
# probs = probs / maxs
# for smple in range(len(z_samples)):
for smple in [38,50,55, 59]:
animate = True
if animate:
Trange = range(len(integration_times))
else:
Trange = [len(integration_times)-1]
for T in Trange:
plt.clf()
plt.close()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 7))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
print("plotting ",T)
ztr0 = z_traj[0,smple,0]
pztr0 = np.exp(np.array(logp_traj)[0,smple,0])
ztrT = z_traj[T,smple,0]
pztrT = np.exp(np.array(logp_traj)[T,smple,0])
probts = np.exp(np.array(logp))
sc0 = axes[0].scatter(znp,probts[T,:,0],s=0.5,marker=None,linestyle='-',c=probts[T,:,0],cmap='viridis',zorder=3)
sc0.set_clim(0.,maxs[-1][0])
axes[0].scatter(ztrT,[0.], color="#F012BE",s=10.,zorder=5,clip_on=False)
axes[0].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.4,linewidth=0.5,zorder=2)
axes[0].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.4,linewidth=0.5,clip_on=False,zorder=2)
axes[0].plot([ztrT, ztrT],[0.,pztrT], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
axes[0].plot([ztrT, znp[0]],[pztrT,pztrT], color="#2ECC40",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
axes[0].plot([znp[0], znp[0]],[pztr0,pztrT], color="#2ECC40",alpha=1.0,linewidth=1,zorder=10,clip_on=False)
axes[0].set_xlim(-4,4)
axes[0].set_ylim(0.,maxs[-1][0])
axes[0].set_ylabel(r"$p(z_{t})$",labelpad=20)
axes[0].set_yticks([min(0.9*pztr0,(pztrT+pztr0)/2)])
axes[0].set_yticklabels([r"$\Delta$"], color="#2ECC40")
axes[0].tick_params(width=0,labelsize=10)
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
# axes[0].set_clip_on(False)
axes[0].set_zorder(2)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].plot(z_traj[0:T,smple,0],ts[0:T], color="#F012BE")
axes[1].scatter([ztr0],[0.], color="#F012BE", s=10.,zorder=5)
axes[1].scatter([ztrT],[ts[T]], color="#F012BE",s=10.,zorder=5,clip_on=True)
axes[1].scatter([ztrT],[ts[-1]], color="#F012BE",s=10.,zorder=5,clip_on=True,visible=False)#hack
axes[1].streamplot(xs,ts,dxs.numpy()[:,::-1],dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
# axes[1].get_xaxis().set_visible(False)
axes[1].spines['bottom'].set_position('zero')
# axes[1].spines['bottom'].set_zorder(0.)
axes[1].spines['top'].set_position(('data',0.5))
# axes[1].spines['top'].set_zorder(0.)
axes[1].spines['left'].set_bounds(0.,0.5)
axes[1].spines['right'].set_bounds(0.,0.5)
axes[1].set_clip_on(True)
# axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
sc2 =axes[2].scatter(znp,probs[-1,:],s=0.5,marker=None,linestyle='-',c=probs[-1,:],cmap='viridis')
sc2.set_clim(0.,maxs[-1][0])
axes[2].scatter([ztr0],[0.],color="#F012BE",s=10.,zorder=5,clip_on=False)
axes[2].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5)
axes[2].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.8,linewidth=0.5,clip_on=False)
axes[2].set_xlim(-4,4)
axes[2].set_ylabel(r"$p(z_{t_T})$",labelpad=20)
axes[2].set_xlabel(r"$z$")
axes[2].set_yticks([])
axes[2].set_xticks([])
# axes[2].get_xaxis().set_visible(False)
axes[2].spines['top'].set_visible(False)
axes[2].spines['right'].set_visible(False)
axes[2].set_ylim(0.,maxs[-1][0])
# fig.subplots_adjust(hspace=0.)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
pos2 = axes[2].get_position(original=False)
# print(pos0.y0)
# print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
if animate:
makedirs(os.path.join(savedir, "anim_rev",'{:0>4}'.format(str(smple))))
plt.savefig(os.path.join(savedir, "anim_rev",'{:0>4}'.format(str(smple)),"img-"+'{:0>4}'.format(str(T))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=300)
plt.savefig(os.path.join(savedir, "fig1_1d_together_rev"+'{:0>4}'.format(str(smple))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=350)
def save_fig1_1d_ptd_timescrub(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,100)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
for timerow in range(integration_times.shape[0]):
plt.clf()
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs / maxs
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5]},
figsize=(8,13))
fig.set_tight_layout({'pad': 0.1, 'h_pad': -1.0})
axes[0].scatter(znp,np.exp(np.array(logp)[::-1][timerow,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[::-1][timerow,:,0]),cmap='viridis')
axes[0].set_xlim(-4,4)
axes[0].set_ylim(0,.42)
axes[0].set_ylabel(r"$p(z(t))$",labelpad=20)
axes[0].set_yticks([])
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[0].set_ylim(bottom=0.0)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.9,density=(0.7,0.5),arrowsize=0.8)
axes[1].plot([-4,4],[integration_times[timerow],integration_times[timerow]],c='red',zorder=100)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_xticks([])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
axes[1].set_xlabel(r"$z$")
axes[1].spines['top'].set_visible(False)
axes[1].spines['bottom'].set_visible(False)
axes[1].get_xaxis().set_visible(True)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
print(pos0.y0)
print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.15,pos1.x1,pos0.height])
plt.savefig(os.path.join(savedir, "fig1_1d_scrub"+str(timerow)+".png"),pad_inches=0,bbox_inches='tight')
plt.close()
| 54,154 | 42.04849 | 172 | py |
steer | steer-master/ffjord/diagnostics/approx_error_1d_particle_traj.py | from inspect import getsourcefile
import sys
import os
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
import argparse
import os
import time
import torch
import torch.optim as optim
import lib.utils as utils
import lib.layers.odefunc as odefunc
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import build_model_tabular
import seaborn as sns
sns.set_style("whitegrid")
colors = ["windows blue", "amber", "greyish", "faded green", "dusty purple"]
sns.palplot(sns.xkcd_palette(colors))
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--ntimes', type=int, default=101)
parser.add_argument('--num_particles', type=int, default=10)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument('--save', type=str, default='experiments/approx_error_1d')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
def normal_log_density(x, mean=0, stdev=1):
term = (x - mean) / stdev
return -0.5 * (np.log(2 * np.pi) + 2 * np.log(stdev) + term * term)
def data_sample(batch_size):
x1 = np.random.randn(batch_size) * np.sqrt(0.4) - 2.8
x2 = np.random.randn(batch_size) * np.sqrt(0.4) - 0.9
x3 = np.random.randn(batch_size) * np.sqrt(0.4) + 2.
xs = np.concatenate([x1[:, None], x2[:, None], x3[:, None]], 1)
k = np.random.randint(0, 3, batch_size)
x = xs[np.arange(batch_size), k]
return torch.tensor(x[:, None]).float().to(device)
def data_density(x):
p1 = normal_log_density(x, mean=-2.8, stdev=np.sqrt(0.4))
p2 = normal_log_density(x, mean=-0.9, stdev=np.sqrt(0.4))
p3 = normal_log_density(x, mean=2.0, stdev=np.sqrt(0.4))
return torch.log(p1.exp() / 3 + p2.exp() / 3 + p3.exp() / 3)
def model_density(x, model):
x = x.to(device)
z, delta_logp = model(x, torch.zeros_like(x))
logpx = standard_normal_logprob(z) - delta_logp
return logpx
def model_sample(model, batch_size):
z = torch.randn(batch_size, 1)
logqz = standard_normal_logprob(z)
x, logqx = model(z, logqz, reverse=True)
return x, logqx
def compute_loss(args, model, batch_size=None):
if batch_size is None: batch_size = args.batch_size
x = data_sample(batch_size)
logpx = model_density(x, model)
return -torch.mean(logpx)
def train():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
nfef_meter = utils.RunningAverageMeter(0.93)
nfeb_meter = utils.RunningAverageMeter(0.93)
tt_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
loss = compute_loss(args, model)
loss_meter.update(loss.item())
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
optimizer.step()
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})'
' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg,
nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
test_loss = compute_loss(args, model, batch_size=args.test_batch_size)
test_nfe = count_nfe(model)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
if itr % args.viz_freq == 0:
with torch.no_grad():
model.eval()
xx = torch.linspace(-10, 10, 10000).view(-1, 1)
true_p = data_density(xx)
plt.plot(xx.view(-1).cpu().numpy(), true_p.view(-1).exp().cpu().numpy(), label='True')
true_p = model_density(xx, model)
plt.plot(xx.view(-1).cpu().numpy(), true_p.view(-1).exp().cpu().numpy(), label='Model')
utils.makedirs(os.path.join(args.save, 'figs'))
plt.savefig(os.path.join(args.save, 'figs', '{:06d}.jpg'.format(itr)))
plt.close()
model.train()
end = time.time()
logger.info('Training has finished.')
def evaluate():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
tols = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8]
errors = []
with torch.no_grad():
for tol in tols:
args.rtol = tol
args.atol = tol
set_cnf_options(args, model)
xx = torch.linspace(-15, 15, 500000).view(-1, 1).to(device)
prob_xx = model_density(xx, model).double().view(-1).cpu()
xx = xx.double().cpu().view(-1)
dxx = torch.log(xx[1:] - xx[:-1])
num_integral = torch.logsumexp(prob_xx[:-1] + dxx, 0).exp()
errors.append(float(torch.abs(num_integral - 1.)))
print(errors[-1])
plt.figure(figsize=(5, 3))
plt.plot(tols, errors, linewidth=3, marker='o', markersize=7)
# plt.plot([-1, 0.2], [-1, 0.2], '--', color='grey', linewidth=1)
plt.xscale("log", nonposx='clip')
# plt.yscale("log", nonposy='clip')
plt.xlabel('Solver Tolerance', fontsize=17)
plt.ylabel('$| 1 - \int p(x) |$', fontsize=17)
plt.tight_layout()
plt.savefig('ode_solver_error_vs_tol.pdf')
def visualize_times():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
viz_times = torch.linspace(0., args.time_length , args.ntimes)
errors = []
with torch.no_grad():
for i,t in enumerate(tqdm(viz_times[1:])):
model.eval()
set_cnf_options(args, model)
xx = torch.linspace(-10, 10, 10000).view(-1, 1)
#generated_p = model_density(xx, model)
generated_p=0
for cnf in model.chain:
xx = xx.to(device)
z, delta_logp = cnf(xx, torch.zeros_like(xx),integration_times=torch.Tensor( [ 0, t ] ))
generated_p = standard_normal_logprob(z) - delta_logp
plt.plot(xx.view(-1).cpu().numpy(), generated_p.view(-1).exp().cpu().numpy(), label='Model')
utils.makedirs(os.path.join(args.save,'test_times', 'figs'))
plt.savefig(os.path.join(args.save,'test_times', 'figs', '{:04d}.jpg'.format(i)))
plt.close()
trajectory_to_video(os.path.join(args.save,'test_times', 'figs'))
def visualize_evolution():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
viz_times = torch.linspace(0., args.time_length , args.ntimes)
errors = []
viz_times_np = viz_times[1:].detach().cpu().numpy()
xx = torch.linspace(-5, 5, args.num_particles).view(-1, 1)
xx_np = xx.detach().cpu().numpy()
xs,ys = np.meshgrid(xx,viz_times_np)
#xx,yy = np.meshgrid(args.num_particles, viz_times_np )
#all_evolutions = np.zeros((args.ntimes-1,args.num_particles))
all_evolutions = np.zeros((args.num_particles,args.ntimes-1))
with torch.no_grad():
for i,t in enumerate(tqdm(viz_times[1:])):
model.eval()
set_cnf_options(args, model)
#xx = torch.linspace(-5, 5, args.num_particles).view(-1, 1)
#generated_p = model_density(xx, model)
generated_p=0
for cnf in model.chain:
xx = xx.to(device)
z, delta_logp = cnf(xx, torch.zeros_like(xx),integration_times=torch.Tensor( [ 0, t ] ))
generated_p = standard_normal_logprob(z) - delta_logp
generated_p = generated_p.detach()
#plt.plot(xx.view(-1).cpu().numpy(), generated_p.view(-1).exp().cpu().numpy(), label='Model')
cur_evolution=generated_p.view(-1).exp().cpu().numpy()
#all_evolutions[i]= np.array(cur_evolution)
all_evolutions[:,i]= np.array(cur_evolution)
#xx = np.array(xx.detach().cpu().numpy())
#yy = np.array(yy)
plt.figure(dpi=1200)
plt.clf()
all_evolutions = all_evolutions.astype('float32')
print(xs.shape)
print(ys.shape)
print(all_evolutions.shape)
#plt.pcolormesh(ys, xs, all_evolutions)
plt.pcolormesh(xs, ys, all_evolutions.transpose())
utils.makedirs(os.path.join(args.save,'test_times', 'figs'))
plt.savefig(os.path.join(args.save,'test_times', 'figs', 'evolution.jpg'.format(i)))
plt.close()
def visualize_particle_flow():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
viz_times = torch.linspace(0., args.time_length , args.ntimes)
errors = []
xx = torch.linspace(-5, 5, args.num_particles).view(-1, 1)
zs=[]
#zs.append(xx.view(-1).cpu().numpy())
with torch.no_grad():
for i,t in enumerate(tqdm(viz_times[1:])):
model.eval()
set_cnf_options(args, model)
#generated_p = model_density(xx, model)
generated_p=0
for cnf in model.chain:
xx = xx.to(device)
z, delta_logp = cnf(xx, torch.zeros_like(xx),integration_times=torch.Tensor( [ 0, t ] ))
generated_p = standard_normal_logprob(z) - delta_logp
zs.append(z.cpu().numpy())
#plt.plot(xx.view(-1).cpu().numpy(), generated_p.view(-1).exp().cpu().numpy(), label='Model')
#plt.savefig(os.path.join(args.save,'test_times', 'figs', '{:04d}.jpg'.format(i)))
#plt.close()
zs=np.array(zs).reshape(args.ntimes-1,args.num_particles)
viz_t = viz_times[1:].numpy()
#print(zs)
plt.figure(dpi=1200)
plt.clf()
#plt.plot(viz_t , zs[:,0])
with sns.color_palette("Blues_d"):
plt.plot(viz_t , zs)
plt.xlabel("Test Time")
#plt.tight_layout()
utils.makedirs(os.path.join(args.save,'test_times', 'figs'))
plt.savefig(os.path.join(args.save,'test_times', 'figs', 'particle_trajectory.jpg'.format(i)))
plt.close()
def trajectory_to_video(savedir):
import subprocess
bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, '%04d.jpg'), os.path.join(savedir, 'traj.mp4'))
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if __name__ == '__main__':
#train()
#evaluate()
#visualize_times()
visualize_particle_flow()
#visualize_evolution()
| 15,540 | 36.720874 | 116 | py |
steer | steer-master/ffjord/diagnostics/plot_bottleneck_losses.py | import re
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.signal
import scipy.ndimage
# BASE = "experiments/cnf_mnist_64-64-128-128-64-64/logs"
# RESIDUAL = "experiments/cnf_mnist_64-64-128-128-64-64_residual/logs"
# RADEMACHER = "experiments/cnf_mnist_64-64-128-128-64-64_rademacher/logs"
BOTTLENECK = "experiments/cnf_mnist_bottleneck_64-64-128-5-128-64-64/logs"
BOTTLENECK_EST = "experiments/cnf_mnist_bottleneck_64-64-128-5-128-64-64_ae-est/logs"
RAD_BOTTLENECK = "experiments/cnf_mnist_bottleneck_64-64-128-5-128-64-64_rademacher/logs"
RAD_BOTTLENECK_EST = "experiments/cnf_mnist_bottleneck_64-64-128-5-128-64-64_ae-est_rademacher/logs"
# ET_ALL = "experiments/cnf_mnist_bottleneck_64-64-128-5-128-64-64_ae-est_residual_rademacher/logs"
def get_losses(filename):
with open(filename, "r") as f:
lines = f.readlines()
losses = []
for line in lines:
w = re.findall(r"Bit/dim [^|(]*\([0-9\.]*\)", line)
if w: w = re.findall(r"\([0-9\.]*\)", w[0])
if w: w = re.findall(r"[0-9\.]+", w[0])
if w:
losses.append(float(w[0]))
return losses
bottleneck_loss = get_losses(BOTTLENECK)
bottleneck_est_loss = get_losses(BOTTLENECK_EST)
rademacher_bottleneck_loss = get_losses(RAD_BOTTLENECK)
rademacher_bottleneck_est_loss = get_losses(RAD_BOTTLENECK_EST)
bottleneck_loss = scipy.signal.medfilt(bottleneck_loss, 21)
bottleneck_est_loss = scipy.signal.medfilt(bottleneck_est_loss, 21)
rademacher_bottleneck_loss = scipy.signal.medfilt(rademacher_bottleneck_loss, 21)
rademacher_bottleneck_est_loss = scipy.signal.medfilt(rademacher_bottleneck_est_loss, 21)
import seaborn as sns
sns.set_style("whitegrid")
colors = ["windows blue", "amber", "greyish", "faded green", "dusty purple"]
sns.palplot(sns.xkcd_palette(colors))
import brewer2mpl
line_colors = brewer2mpl.get_map('Set2', 'qualitative', 4).mpl_colors
dark_colors = brewer2mpl.get_map('Dark2', 'qualitative', 4).mpl_colors
# plt.style.use('ggplot')
plt.figure(figsize=(4, 3))
plt.plot(np.arange(len(bottleneck_loss)) / 30, bottleneck_loss, ':', color=line_colors[1], label="Gaussian w/o Trick")
plt.plot(np.arange(len(bottleneck_est_loss)) / 30, bottleneck_est_loss, color=dark_colors[1], label="Gaussian w/ Trick")
plt.plot(np.arange(len(rademacher_bottleneck_loss)) / 30, rademacher_bottleneck_loss, ':', color=line_colors[2], label="Rademacher w/o Trick")
plt.plot(np.arange(len(rademacher_bottleneck_est_loss)) / 30, rademacher_bottleneck_est_loss, color=dark_colors[2], label="Rademacher w/ Trick")
plt.legend(frameon=True, fontsize=10.5, loc='upper right')
plt.ylim([1.1, 1.7])
# plt.yscale("log", nonposy='clip')
plt.xlabel("Epoch", fontsize=18)
plt.ylabel("Bits/dim", fontsize=18)
plt.xlim([0, 170])
plt.tight_layout()
plt.savefig('bottleneck_losses.pdf')
| 2,848 | 39.126761 | 144 | py |
steer | steer-master/ffjord/diagnostics/plot_flows.py | from inspect import getsourcefile
import sys
import os
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
import os
import torch
import lib.toy_data as toy_data
import lib.utils as utils
import lib.visualize_flow as viz_flow
import lib.layers.odefunc as odefunc
import lib.layers as layers
from train_misc import standard_normal_logprob
from train_misc import build_model_tabular, count_parameters
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
'--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals', 'checkerboard', 'rings'],
type=str, default='pinwheel'
)
parser.add_argument('--discrete', action='store_true')
parser.add_argument('--depth', help='number of coupling layers', type=int, default=10)
parser.add_argument('--glow', type=eval, choices=[True, False], default=False)
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=2500)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
parser.add_argument('--checkpt', type=str, required=True)
parser.add_argument('--save', type=str, default='experiments/cnf')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
def construct_discrete_model():
chain = []
for i in range(args.depth):
if args.glow: chain.append(layers.BruteForceLayer(2))
chain.append(layers.CouplingLayer(2, swap=i % 2 == 0))
return layers.SequentialFlow(chain)
def get_transforms(model):
def sample_fn(z, logpz=None):
if logpz is not None:
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if logpx is not None:
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return sample_fn, density_fn
if __name__ == '__main__':
if args.discrete:
model = construct_discrete_model().to(device)
model.load_state_dict(torch.load(args.checkpt)['state_dict'])
else:
model = build_model_tabular(args, 2).to(device)
sd = torch.load(args.checkpt)['state_dict']
fixed_sd = {}
for k, v in sd.items():
fixed_sd[k.replace('odefunc.odefunc', 'odefunc')] = v
model.load_state_dict(fixed_sd)
print(model)
print("Number of trainable parameters: {}".format(count_parameters(model)))
model.eval()
p_samples = toy_data.inf_train_gen(args.data, batch_size=800**2)
with torch.no_grad():
sample_fn, density_fn = get_transforms(model)
plt.figure(figsize=(10, 10))
ax = ax = plt.gca()
viz_flow.plt_samples(p_samples, ax, npts=800)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
fig_filename = os.path.join(args.save, 'figs', 'true_samples.jpg')
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
plt.figure(figsize=(10, 10))
ax = ax = plt.gca()
viz_flow.plt_flow_density(standard_normal_logprob, density_fn, ax, npts=800, memory=200, device=device)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
fig_filename = os.path.join(args.save, 'figs', 'model_density.jpg')
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
plt.figure(figsize=(10, 10))
ax = ax = plt.gca()
viz_flow.plt_flow_samples(torch.randn, sample_fn, ax, npts=800, memory=200, device=device)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
fig_filename = os.path.join(args.save, 'figs', 'model_samples.jpg')
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
| 6,070 | 37.424051 | 119 | py |
steer | steer-master/ffjord/diagnostics/viz_high_fidelity_toy.py | import os
import math
from tqdm import tqdm
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_density_traj(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu'):
model.eval()
# sample from a grid
npts = 800
side = np.linspace(-4, 4, npts)
xx, yy = np.meshgrid(side, side)
xx = torch.from_numpy(xx).type(torch.float32).to(device)
yy = torch.from_numpy(yy).type(torch.float32).to(device)
z_grid = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1)], 1)
with torch.no_grad():
# We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logpz_grid = torch.sum(standard_normal_logprob(z_grid), 1, keepdim=True)
for cnf in model.chain:
end_time = cnf.sqrt_end_time * cnf.sqrt_end_time
viz_times = torch.linspace(0., end_time, ntimes)
logpz_grid = [standard_normal_logprob(z_grid).sum(1, keepdim=True)]
for t in tqdm(viz_times[1:]):
inds = torch.arange(0, z_grid.shape[0]).to(torch.int64)
logpz_t = []
for ii in torch.split(inds, int(z_grid.shape[0] * memory)):
z0, delta_logp = cnf(
z_grid[ii],
torch.zeros(z_grid[ii].shape[0], 1).to(z_grid), integration_times=torch.tensor([0.,
t.item()])
)
logpz_t.append(standard_normal_logprob(z0).sum(1, keepdim=True) - delta_logp)
logpz_grid.append(torch.cat(logpz_t, 0))
logpz_grid = torch.stack(logpz_grid, 0).cpu().detach().numpy()
z_grid = z_grid.cpu().detach().numpy()
plt.figure(figsize=(8, 8))
for t in range(logpz_grid.shape[0]):
plt.clf()
ax = plt.gca()
# plot the density
z, logqz = z_grid, logpz_grid[t]
xx = z[:, 0].reshape(npts, npts)
yy = z[:, 1].reshape(npts, npts)
qz = np.exp(logqz).reshape(npts, npts)
plt.pcolormesh(xx, yy, qz, cmap='binary')
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
cmap = matplotlib.cm.get_cmap('binary')
#ax.set_axis_bgcolor(cmap(0.))
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.tight_layout()
makedirs(savedir)
plt.savefig(os.path.join(savedir, f"viz-{t:05d}.jpg"))
def trajectory_to_video(savedir):
import subprocess
bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, 'viz-%05d.jpg'), os.path.join(savedir, 'traj.mp4'))
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if __name__ == '__main__':
import argparse
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')))
import lib.toy_data as toy_data
from train_misc import count_parameters
from train_misc import set_cnf_options, add_spectral_norm, create_regularization_fns
from train_misc import build_model_tabular
def get_ckpt_model_and_data(args):
# Load checkpoint.
checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
ckpt_args = checkpt['args']
state_dict = checkpt['state_dict']
# Construct model and restore checkpoint.
regularization_fns, regularization_coeffs = create_regularization_fns(ckpt_args)
model = build_model_tabular(ckpt_args, 2, regularization_fns).to(device)
if ckpt_args.spectral_norm: add_spectral_norm(model)
set_cnf_options(ckpt_args, model)
model.load_state_dict(state_dict)
model.to(device)
print(model)
print("Number of trainable parameters: {}".format(count_parameters(model)))
# Load samples from dataset
data_samples = toy_data.inf_train_gen(ckpt_args.data, batch_size=2000)
return model, data_samples
parser = argparse.ArgumentParser()
parser.add_argument('--checkpt', type=str, required=True)
parser.add_argument('--ntimes', type=int, default=101)
parser.add_argument('--memory', type=float, default=0.01, help='Higher this number, the more memory is consumed.')
parser.add_argument('--save', type=str, default='trajectory')
args = parser.parse_args()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model, data_samples = get_ckpt_model_and_data(args)
save_density_traj(model, data_samples, args.save, ntimes=args.ntimes, memory=args.memory, device=device)
trajectory_to_video(args.save)
| 5,114 | 37.458647 | 119 | py |
steer | steer-master/ffjord/diagnostics/approx_error_1d.py | from inspect import getsourcefile
import sys
import os
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
import argparse
import os
import time
import torch
import torch.optim as optim
import lib.utils as utils
import lib.layers.odefunc as odefunc
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import build_model_tabular
import seaborn as sns
sns.set_style("whitegrid")
colors = ["windows blue", "amber", "greyish", "faded green", "dusty purple"]
sns.palplot(sns.xkcd_palette(colors))
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--ntimes', type=int, default=101)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument('--save', type=str, default='experiments/approx_error_1d')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
def normal_log_density(x, mean=0, stdev=1):
term = (x - mean) / stdev
return -0.5 * (np.log(2 * np.pi) + 2 * np.log(stdev) + term * term)
#def data_sample(batch_size):
# x1 = np.random.randn(batch_size) * np.sqrt(0.4) - 2.8
# x2 = np.random.randn(batch_size) * np.sqrt(0.4) - 0.9
# x3 = np.random.randn(batch_size) * np.sqrt(0.4) + 2.
# xs = np.concatenate([x1[:, None], x2[:, None], x3[:, None]], 1)
# k = np.random.randint(0, 3, batch_size)
# x = xs[np.arange(batch_size), k]
# return torch.tensor(x[:, None]).float().to(device)
#
#
#def data_density(x):
# p1 = normal_log_density(x, mean=-2.8, stdev=np.sqrt(0.4))
# p2 = normal_log_density(x, mean=-0.9, stdev=np.sqrt(0.4))
# p3 = normal_log_density(x, mean=2.0, stdev=np.sqrt(0.4))
# return torch.log(p1.exp() / 3 + p2.exp() / 3 + p3.exp() / 3)
def data_sample(batch_size):
x1 = np.random.randn(batch_size) * np.sqrt(0.4) -2.8
x2 = np.random.randn(batch_size) * np.sqrt(0.4) - 0.9
x3 = np.random.randn(batch_size) * np.sqrt(0.4) + 2
x4 = np.random.randn(batch_size) * np.sqrt(0.4) + 5.1
x5 = np.random.randn(batch_size) * np.sqrt(0.4) + 3.4
xs = np.concatenate([x1[:, None], x2[:, None], x3[:, None], x4[:, None], x5[:, None] ], 1)
k = np.random.randint(0, 5, batch_size)
x = xs[np.arange(batch_size), k]
return torch.tensor(x[:, None]).float().to(device)
def data_density(x):
p1 = normal_log_density(x, mean= -2.8 ,stdev=np.sqrt(0.4))
p2 = normal_log_density(x, mean= -0.9, stdev=np.sqrt(0.4))
p3 = normal_log_density(x, mean= 2 , stdev=np.sqrt(0.4))
p4 = normal_log_density(x, mean= 5.1, stdev=np.sqrt(0.4))
p5 = normal_log_density(x, mean= 3.4, stdev=np.sqrt(0.4))
return torch.log(p1.exp() / 5 + p2.exp() / 5 + p3.exp() / 5 + p4.exp() / 5 + p5.exp() / 5 )
def model_density(x, model):
x = x.to(device)
z, delta_logp = model(x, torch.zeros_like(x))
logpx = standard_normal_logprob(z) - delta_logp
return logpx
def model_sample(model, batch_size):
z = torch.randn(batch_size, 1)
logqz = standard_normal_logprob(z)
x, logqx = model(z, logqz, reverse=True)
return x, logqx
def compute_loss(args, model, batch_size=None):
if batch_size is None: batch_size = args.batch_size
x = data_sample(batch_size)
logpx = model_density(x, model)
return -torch.mean(logpx)
def train():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
nfef_meter = utils.RunningAverageMeter(0.93)
nfeb_meter = utils.RunningAverageMeter(0.93)
tt_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
loss = compute_loss(args, model)
loss_meter.update(loss.item())
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
optimizer.step()
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})'
' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg,
nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
test_loss = compute_loss(args, model, batch_size=args.test_batch_size)
test_nfe = count_nfe(model)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
if itr % args.viz_freq == 0:
with torch.no_grad():
model.eval()
xx = torch.linspace(-10, 10, 10000).view(-1, 1)
true_p = data_density(xx)
plt.plot(xx.view(-1).cpu().numpy(), true_p.view(-1).exp().cpu().numpy(), label='True')
true_p = model_density(xx, model)
plt.plot(xx.view(-1).cpu().numpy(), true_p.view(-1).exp().cpu().numpy(), label='Model')
utils.makedirs(os.path.join(args.save, 'figs'))
plt.savefig(os.path.join(args.save, 'figs', '{:06d}.jpg'.format(itr)))
plt.close()
model.train()
end = time.time()
logger.info('Training has finished.')
def evaluate():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
tols = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8]
errors = []
with torch.no_grad():
for tol in tols:
args.rtol = tol
args.atol = tol
set_cnf_options(args, model)
xx = torch.linspace(-15, 15, 500000).view(-1, 1).to(device)
prob_xx = model_density(xx, model).double().view(-1).cpu()
xx = xx.double().cpu().view(-1)
dxx = torch.log(xx[1:] - xx[:-1])
num_integral = torch.logsumexp(prob_xx[:-1] + dxx, 0).exp()
errors.append(float(torch.abs(num_integral - 1.)))
print(errors[-1])
plt.figure(figsize=(5, 3))
plt.plot(tols, errors, linewidth=3, marker='o', markersize=7)
# plt.plot([-1, 0.2], [-1, 0.2], '--', color='grey', linewidth=1)
plt.xscale("log", nonposx='clip')
# plt.yscale("log", nonposy='clip')
plt.xlabel('Solver Tolerance', fontsize=17)
plt.ylabel('$| 1 - \int p(x) |$', fontsize=17)
plt.tight_layout()
plt.savefig('ode_solver_error_vs_tol.pdf')
def visualize_times():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
viz_times = torch.linspace(0., args.time_length , args.ntimes)
errors = []
with torch.no_grad():
for i,t in enumerate(tqdm(viz_times[1:])):
model.eval()
set_cnf_options(args, model)
xx = torch.linspace(-10, 10, 10000).view(-1, 1)
#generated_p = model_density(xx, model)
generated_p=0
for cnf in model.chain:
xx = xx.to(device)
z, delta_logp = cnf(xx, torch.zeros_like(xx),integration_times=torch.Tensor( [ 0, t ] ))
generated_p = standard_normal_logprob(z) - delta_logp
plt.plot(xx.view(-1).cpu().numpy(), generated_p.view(-1).exp().cpu().numpy(), label='Model')
utils.makedirs(os.path.join(args.save,'test_times', 'figs'))
plt.savefig(os.path.join(args.save,'test_times', 'figs', '{:04d}.jpg'.format(i)))
plt.close()
trajectory_to_video(os.path.join(args.save,'test_times', 'figs'))
def trajectory_to_video(savedir):
import subprocess
bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, '%04d.jpg'), os.path.join(savedir, 'traj.mp4'))
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if __name__ == '__main__':
train()
evaluate()
visualize_times()
| 12,572 | 36.984894 | 116 | py |
steer | steer-master/ffjord/diagnostics/fig_1_1d_toy.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from inspect import getsourcefile
import sys
import argparse
import os
import time
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import torch
import torch.optim as optim
import lib.toy_data as toy_data
import lib.utils as utils
from lib.visualize_flow import visualize_transform
import lib.layers.odefunc as odefunc
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import add_spectral_norm, spectral_norm_power_iteration
from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log
from train_misc import build_model_tabular
from viz_toy import save_trajectory, trajectory_to_video
from viz_fig1 import save_fig1,save_fig1_rev,save_fig1_1d_ptd,save_fig1_1d_ptd_timescrub,save_fig1_1d_icml, save_fig1_1d_icml_rev,save_fig1_1d_icml_no_top_or_bottom
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
'--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals','rowimg','rowimgsmol','willrow','1d_density','1d_density_mix'], type=str,
default='1d_density_mix'
)
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=2500)
parser.add_argument('--batch_size', type=int, default=8000)
parser.add_argument('--test_batch_size', type=int, default=100)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
#parser.add_argument("--resume", type=str, default=None)
#parser.add_argument('--save', type=str, default='experiments/fig1_1d_toy')
parser.add_argument('--save', type=str, default='experiments/approx_error_1d')
parser.add_argument('--resume', type=str, default='experiments/approx_error_1d')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--icml_plot', type=int,default=1)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
device = torch.device('cpu') #torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
print('GPU active:',torch.cuda.is_available())
def get_transforms(model):
def sample_fn(z, logpz=None):
if logpz is not None:
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if logpx is not None:
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return sample_fn, density_fn
def compute_loss(args, model, batch_size=None):
if batch_size is None: batch_size = args.batch_size
# load data
x = toy_data.inf_train_gen(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
zero = torch.zeros(x.shape[0], 1).to(x)
# transform to z
z, delta_logp = model(x, zero)
# compute log q(z)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = logpz - delta_logp
loss = -torch.mean(logpx)
return loss
if __name__ == '__main__':
x= toy_data.inf_train_gen(args.data, batch_size=args.batch_size)
print(x.shape)
plt.hist(x,bins=500)
plt.savefig('testwill.png')
regularization_fns, regularization_coeffs = create_regularization_fns(args)
# model = build_model_tabular(args, 200, regularization_fns).to(device)
model = build_model_tabular(args, 1, regularization_fns).to(device)
if args.spectral_norm: add_spectral_norm(model)
set_cnf_options(args, model)
# logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# restore parameters
if args.resume is not None:
#checkpt = torch.load(args.resume+'/checkpt.pth', map_location=lambda storage, loc: storage)
#model.load_state_dict(checkpt["state_dict"])
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
if "optim_state_dict" in checkpt.keys():
optimizer.load_state_dict(checkpt["optim_state_dict"])
# Manually move optimizer state to device.
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = cvt(v)
if args.icml_plot:
save_fig1_path = os.path.join(args.resume, 'fig1_ani')
logger.info('Plotting fig1 to {}'.format(save_fig1_path))
data_samples = toy_data.inf_train_gen(args.data, batch_size=1)
#save_fig1_1d_icml_no_top_or_bottom(model, data_samples, save_fig1_path, device=device,itr=0)
save_fig1_1d_icml(model, data_samples, save_fig1_path, device=device,itr=0)
#save_fig1_1d_icml_rev(model, data_samples, save_fig1_path, device=device,itr=0)
1/0
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
nfef_meter = utils.RunningAverageMeter(0.93)
nfeb_meter = utils.RunningAverageMeter(0.93)
tt_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
if args.spectral_norm: spectral_norm_power_iteration(model, 1)
loss = compute_loss(args, model)
loss_meter.update(loss.item())
if len(regularization_coeffs) > 0:
reg_states = get_regularization(model, regularization_coeffs)
reg_loss = sum(
reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0
)
loss = loss + reg_loss
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
optimizer.step()
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})'
' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg,
nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
if len(regularization_coeffs) > 0:
log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
test_loss = compute_loss(args, model, batch_size=args.test_batch_size)
test_nfe = count_nfe(model)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
save_fig1_path = os.path.join(args.resume, 'fig1_ani')
logger.info('Plotting fig1 to {}'.format(save_fig1_path))
data_samples = toy_data.inf_train_gen(args.data, batch_size=1)
save_fig1_1d_ptd(model, data_samples, save_fig1_path, device=device,itr=itr)
# save_fig1_rev(model, data_samples, save_fig1_path, device=device)
# if itr % args.viz_freq == 0:
# with torch.no_grad():
# model.eval()
# p_samples = toy_data.inf_train_gen(args.data, batch_size=2000)
# sample_fn, density_fn = get_transforms(model)
# plt.figure(figsize=(9, 3))
# visualize_transform(
# p_samples, torch.randn, standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn,
# samples=True, npts=100, device=device
# )
# fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr))
# utils.makedirs(os.path.dirname(fig_filename))
# plt.savefig(fig_filename)
# plt.close()model
# model.train()
end = time.time()
save_fig1_path = os.path.join(args.resume, 'fig1')
logger.info('Plotting fig1 to {}'.format(save_fig1_path))
data_samples = toy_data.inf_train_gen(args.data, batch_size=1)
save_fig1_1d_ptd(model, data_samples, save_fig1_path, device=device)
save_fig1_1d_ptd_timescrub(model, data_samples, save_fig1_path+'/scrub', device=device)
logger.info('Training has finished.')
# save_fig1_path = os.path.join(args.resume, 'fig1')
# logger.info('Plotting fig1 to {}'.format(save_fig1_path))
# data_samples = toy_data.inf_train_gen(args.data, batch_size=1)
# save_fig1(model, data_samples, save_fig1_path, device=device)
# save_fig1_rev(model, data_samples, save_fig1_path, device=device)
# save_trajectory(model, data_samples, save_traj_dir, device=device)
# trajectory_to_video(save_traj_dir)
| 12,538 | 41.795222 | 166 | py |
steer | steer-master/ffjord/diagnostics/plot_compare_multiscale.py | import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
MNIST_SINGLESCALE = "diagnostics/mnist.log"
MNIST_MULTISCALE = "diagnostics/mnist_multiscale.log"
def get_values(filename):
with open(filename, "r") as f:
lines = f.readlines()
losses = []
nfes = []
for line in lines:
w = re.findall(r"Steps [^|(]*\([0-9\.]*\)", line)
if w: w = re.findall(r"\([0-9\.]*\)", w[0])
if w: w = re.findall(r"[0-9\.]+", w[0])
if w:
nfes.append(float(w[0]))
w = re.findall(r"Bit/dim [^|(]*\([0-9\.]*\)", line)
if w: w = re.findall(r"\([0-9\.]*\)", w[0])
if w: w = re.findall(r"[0-9\.]+", w[0])
if w:
losses.append(float(w[0]))
return losses, nfes
mnist_singlescale_loss, mnist_singlescale_nfes = get_values(MNIST_SINGLESCALE)
mnist_multiscale_loss, mnist_multiscale_nfes = get_values(MNIST_MULTISCALE)
import brewer2mpl
line_colors = brewer2mpl.get_map('Set2', 'qualitative', 4).mpl_colors
dark_colors = brewer2mpl.get_map('Dark2', 'qualitative', 4).mpl_colors
import seaborn as sns
sns.set_style("whitegrid")
colors = ["windows blue", "amber", "greyish", "faded green", "dusty purple"]
sns.palplot(sns.xkcd_palette(colors))
plt.figure(figsize=(4, 2.6))
plt.scatter(mnist_singlescale_nfes[::10], mnist_singlescale_loss[::10], color=line_colors[1], label="Single FFJORD")
plt.scatter(mnist_multiscale_nfes[::10], mnist_multiscale_loss[::10], color=line_colors[2], label="Multiscale FFJORD")
plt.ylim([0.9, 1.25])
plt.legend(frameon=True, fontsize=10.5)
plt.xlabel("NFE", fontsize=18)
plt.ylabel("Bits/dim", fontsize=18)
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=14)
ax.tick_params(axis='both', which='minor', labelsize=10)
plt.tight_layout()
plt.savefig('multiscale_loss_vs_nfe.pdf')
| 1,868 | 29.639344 | 118 | py |
steer | steer-master/ffjord/diagnostics/viz_multiscale.py | from inspect import getsourcefile
import sys
import os
import math
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import argparse
import lib.layers as layers
import lib.odenvp as odenvp
import torch
import torchvision.transforms as tforms
import torchvision.datasets as dset
from torchvision.utils import save_image
import lib.utils as utils
from train_misc import add_spectral_norm, set_cnf_options, count_parameters
parser = argparse.ArgumentParser("Continuous Normalizing Flow")
parser.add_argument("--checkpt", type=str, required=True)
parser.add_argument("--data", choices=["mnist", "svhn", "cifar10", 'lsun_church'], type=str, default="cifar10")
parser.add_argument("--dims", type=str, default="64,64,64")
parser.add_argument("--num_blocks", type=int, default=2, help='Number of stacked CNFs.')
parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"])
parser.add_argument(
"--nonlinearity", type=str, default="softplus", choices=["tanh", "relu", "softplus", "elu", "swish"]
)
parser.add_argument("--conv", type=eval, default=True, choices=[True, False])
parser.add_argument('--solver', type=str, default='dopri5')
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None)
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument("--imagesize", type=int, default=None)
parser.add_argument("--alpha", type=float, default=-1.0)
parser.add_argument('--time_length', type=float, default=1.0)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--add_noise", type=eval, default=True, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=True, choices=[True, False])
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--ntimes', type=int, default=50)
parser.add_argument('--save', type=str, default='img_trajectory')
args = parser.parse_args()
BATCH_SIZE = 8 * 8
def add_noise(x):
"""
[0, 1] -> [0, 255] -> add noise -> [0, 1]
"""
if args.add_noise:
noise = x.new().resize_as_(x).uniform_()
x = x * 255 + noise
x = x / 256
return x
def get_dataset(args):
trans = lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise])
if args.data == "mnist":
im_dim = 1
im_size = 28 if args.imagesize is None else args.imagesize
train_set = dset.MNIST(root="./data", train=True, transform=trans(im_size), download=True)
elif args.data == "cifar10":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.CIFAR10(
root="./data", train=True, transform=tforms.Compose([
tforms.Resize(im_size),
tforms.RandomHorizontalFlip(),
tforms.ToTensor(),
add_noise,
]), download=True
)
elif args.data == 'lsun_church':
im_dim = 3
im_size = 64 if args.imagesize is None else args.imagesize
train_set = dset.LSUN(
'data', ['church_outdoor_train'], transform=tforms.Compose([
tforms.Resize(96),
tforms.RandomCrop(64),
tforms.Resize(im_size),
tforms.ToTensor(),
add_noise,
])
)
data_shape = (im_dim, im_size, im_size)
if not args.conv:
data_shape = (im_dim * im_size * im_size,)
return train_set, data_shape
def create_model(args, data_shape):
hidden_dims = tuple(map(int, args.dims.split(",")))
model = odenvp.ODENVP(
(BATCH_SIZE, *data_shape),
n_blocks=args.num_blocks,
intermediate_dims=hidden_dims,
nonlinearity=args.nonlinearity,
alpha=args.alpha,
cnf_kwargs={"T": args.time_length, "train_T": args.train_T},
)
if args.spectral_norm: add_spectral_norm(model)
set_cnf_options(args, model)
return model
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True)
# load dataset
train_set, data_shape = get_dataset(args)
# build model
model = create_model(args, data_shape)
print(model)
print("Number of trainable parameters: {}".format(count_parameters(model)))
# restore parameters
checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
pruned_sd = {}
for k, v in checkpt['state_dict'].items():
pruned_sd[k.replace('odefunc.odefunc', 'odefunc')] = v
model.load_state_dict(pruned_sd)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=BATCH_SIZE, shuffle=True)
data_samples, _ = train_loader.__iter__().__next__()
# cosine interpolate between 4 real images.
z = data_samples[:4]
print('Inferring base values for 4 example images.')
z = model(z)
phi0 = torch.linspace(0, 0.5, int(math.sqrt(BATCH_SIZE))) * math.pi
phi1 = torch.linspace(0, 0.5, int(math.sqrt(BATCH_SIZE))) * math.pi
phi0, phi1 = torch.meshgrid([phi0, phi1])
phi0, phi1 = phi0.contiguous().view(-1, 1), phi1.contiguous().view(-1, 1)
z = torch.cos(phi0) * (torch.cos(phi1) * z[0] + torch.sin(phi1) * z[1]) + \
torch.sin(phi0) * (torch.cos(phi1) * z[2] + torch.sin(phi1) * z[3])
print('Reconstructing images from latent interpolation.')
z = model(z, reverse=True)
non_cnf_layers = []
utils.makedirs(args.save)
img_idx = 0
def save_imgs_figure(xs):
global img_idx
save_image(
list(xs),
os.path.join(args.save, "img_{:05d}.jpg".format(img_idx)), nrow=int(math.sqrt(BATCH_SIZE)), normalize=True,
range=(0, 1)
)
img_idx += 1
class FactorOut(torch.nn.Module):
def __init__(self, factor_out):
super(FactorOut, self).__init__()
self.factor_out = factor_out
def forward(self, x, reverse=True):
assert reverse
T = x.shape[0] // self.factor_out.shape[0]
return torch.cat([x, self.factor_out.repeat(T, *([1] * (self.factor_out.ndimension() - 1)))], 1)
time_ratio = 1.0
print('Visualizing transformations.')
with torch.no_grad():
for idx, stacked_layers in enumerate(model.transforms):
for layer in stacked_layers.chain:
print(z.shape)
print(non_cnf_layers)
if isinstance(layer, layers.CNF):
# linspace over time, and visualize by reversing through previous non_cnf_layers.
cnf = layer
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
ntimes = int(args.ntimes * time_ratio)
integration_times = torch.linspace(0, end_time.item(), ntimes)
z_traj = cnf(z, integration_times=integration_times)
# reverse z(t) for all times to the input space
z_flatten = z_traj.view(ntimes * BATCH_SIZE, *z_traj.shape[2:])
for prev_layer in non_cnf_layers[::-1]:
z_flatten = prev_layer(z_flatten, reverse=True)
z_inv = z_flatten.view(ntimes, BATCH_SIZE, *data_shape)
for t in range(1, z_inv.shape[0]):
z_t = z_inv[t]
save_imgs_figure(z_t)
z = z_traj[-1]
else:
# update z and place in non_cnf_layers.
z = layer(z)
non_cnf_layers.append(layer)
if idx < len(model.transforms) - 1:
d = z.shape[1] // 2
z, factor_out = z[:, :d], z[:, d:]
non_cnf_layers.append(FactorOut(factor_out))
# After every factor out, we half the time for visualization.
time_ratio = time_ratio / 2
| 8,489 | 37.071749 | 119 | py |
steer | steer-master/ffjord/lib/priors.py | import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
eps = 1e-8
class Uniform(nn.Module):
def __init__(self, a=0, b=1):
super(Normal, self).__init__()
self.a = Variable(torch.Tensor([a]))
self.b = Variable(torch.Tensor([b]))
def _check_inputs(self, size, params):
if size is None and params is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and params is not None:
a = params.select(-1, 0).expand(size)
b = params.select(-1, 1).expand(size)
return a, b
elif size is not None:
a = self.a.expand(size)
b = self.b.expand(size)
return a, b
elif params is not None:
a = params.select(-1, 0)
b = params.select(-1, 1)
return a, b
else:
raise ValueError(
'Given invalid inputs: size={}, params={})'.format(
size, params))
def sample(self, size=None, params=None):
mu, logsigma = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = std_z * torch.exp(logsigma) + mu
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logsigma = self._check_inputs(None, params)
else:
mu, logsigma = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return -0.5 * (tmp * tmp + 2 * logsigma + c)
def get_params(self):
return torch.cat([self.mu, self.logsigma])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logsigma.exp().data[0])
return tmpstr
class Normal(nn.Module):
"""Samples from a Normal distribution using the reparameterization trick.
"""
def __init__(self, mu=0, sigma=1):
super(Normal, self).__init__()
self.normalization = Variable(torch.Tensor([np.log(2 * np.pi)]))
self.mu = Variable(torch.Tensor([mu]))
self.logsigma = Variable(torch.Tensor([math.log(sigma)]))
def _check_inputs(self, size, mu_logsigma):
if size is None and mu_logsigma is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and mu_logsigma is not None:
mu = mu_logsigma.select(-1, 0).expand(size)
logsigma = mu_logsigma.select(-1, 1).expand(size)
return mu, logsigma
elif size is not None:
mu = self.mu.expand(size)
logsigma = self.logsigma.expand(size)
return mu, logsigma
elif mu_logsigma is not None:
mu = mu_logsigma.select(-1, 0)
logsigma = mu_logsigma.select(-1, 1)
return mu, logsigma
else:
raise ValueError(
'Given invalid inputs: size={}, mu_logsigma={})'.format(
size, mu_logsigma))
def sample(self, size=None, params=None):
mu, logsigma = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = std_z * torch.exp(logsigma) + mu
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logsigma = self._check_inputs(None, params)
else:
mu, logsigma = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return -0.5 * (tmp * tmp + 2 * logsigma + c)
def NLL(self, params, sample_params=None):
"""Analytically computes
E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]
If mu_2, and sigma_2^2 are not provided, defaults to entropy.
"""
mu, logsigma = self._check_inputs(None, params)
if sample_params is not None:
sample_mu, sample_logsigma = self._check_inputs(None, sample_params)
else:
sample_mu, sample_logsigma = mu, logsigma
c = self.normalization.type_as(sample_mu.data)
nll = logsigma.mul(-2).exp() * (sample_mu - mu).pow(2) \
+ torch.exp(sample_logsigma.mul(2) - logsigma.mul(2)) + 2 * logsigma + c
return nll.mul(0.5)
def kld(self, params):
"""Computes KL(q||p) where q is the given distribution and p
is the standard Normal distribution.
"""
mu, logsigma = self._check_inputs(None, params)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mean^2 - sigma^2)
kld = logsigma.mul(2).add(1) - mu.pow(2) - logsigma.exp().pow(2)
kld.mul_(-0.5)
return kld
def get_params(self):
return torch.cat([self.mu, self.logsigma])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logsigma.exp().data[0])
return tmpstr
class Laplace(nn.Module):
"""Samples from a Laplace distribution using the reparameterization trick.
"""
def __init__(self, mu=0, scale=1):
super(Laplace, self).__init__()
self.normalization = Variable(torch.Tensor([-math.log(2)]))
self.mu = Variable(torch.Tensor([mu]))
self.logscale = Variable(torch.Tensor([math.log(scale)]))
def _check_inputs(self, size, mu_logscale):
if size is None and mu_logscale is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and mu_logscale is not None:
mu = mu_logscale.select(-1, 0).expand(size)
logscale = mu_logscale.select(-1, 1).expand(size)
return mu, logscale
elif size is not None:
mu = self.mu.expand(size)
logscale = self.logscale.expand(size)
return mu, logscale
elif mu_logscale is not None:
mu = mu_logscale.select(-1, 0)
logscale = mu_logscale.select(-1, 1)
return mu, logscale
else:
raise ValueError(
'Given invalid inputs: size={}, mu_logscale={})'.format(
size, mu_logscale))
def sample(self, size=None, params=None):
mu, logscale = self._check_inputs(size, params)
scale = torch.exp(logscale)
# Unif(-0.5, 0.5)
u = Variable(torch.rand(mu.size()).type_as(mu.data)) - 0.5
sample = mu - scale * torch.sign(u) * torch.log(1 - 2 * torch.abs(u) + eps)
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logscale = self._check_inputs(None, params)
else:
mu, logscale = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logscale = logscale.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_scale = torch.exp(-logscale)
ins_exp = - torch.abs(sample - mu) * inv_scale
return ins_exp + c - logscale
def get_params(self):
return torch.cat([self.mu, self.logscale])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logscale.exp().data[0])
return tmpstr
| 8,414 | 32.392857 | 84 | py |
steer | steer-master/ffjord/lib/spectral_norm.py | """
Spectral Normalization from https://arxiv.org/abs/1802.05957
"""
import types
import torch
from torch.nn.functional import normalize
POWER_ITERATION_FN = "spectral_norm_power_iteration"
class SpectralNorm(object):
def __init__(self, name='weight', dim=0, eps=1e-12):
self.name = name
self.dim = dim
self.eps = eps
def compute_weight(self, module, n_power_iterations):
if n_power_iterations < 0:
raise ValueError(
'Expected n_power_iterations to be non-negative, but '
'got n_power_iterations={}'.format(n_power_iterations)
)
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
v = getattr(module, self.name + '_v')
weight_mat = weight
if self.dim != 0:
# permute dim to front
weight_mat = weight_mat.permute(self.dim, * [d for d in range(weight_mat.dim()) if d != self.dim])
height = weight_mat.size(0)
weight_mat = weight_mat.reshape(height, -1)
with torch.no_grad():
for _ in range(n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
v = normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
u = normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
setattr(module, self.name + '_u', u)
setattr(module, self.name + '_v', v)
sigma = torch.dot(u, torch.matmul(weight_mat, v))
weight = weight / sigma
setattr(module, self.name, weight)
def remove(self, module):
weight = getattr(module, self.name)
delattr(module, self.name)
delattr(module, self.name + '_u')
delattr(module, self.name + '_orig')
module.register_parameter(self.name, torch.nn.Parameter(weight))
def get_update_method(self, module):
def update_fn(module, n_power_iterations):
self.compute_weight(module, n_power_iterations)
return update_fn
def __call__(self, module, unused_inputs):
del unused_inputs
self.compute_weight(module, n_power_iterations=0)
# requires_grad might be either True or False during inference.
if not module.training:
r_g = getattr(module, self.name + '_orig').requires_grad
setattr(module, self.name, getattr(module, self.name).detach().requires_grad_(r_g))
@staticmethod
def apply(module, name, dim, eps):
fn = SpectralNorm(name, dim, eps)
weight = module._parameters[name]
height = weight.size(dim)
u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
v = normalize(weight.new_empty(int(weight.numel() / height)).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter(fn.name + "_orig", weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# buffer, which will cause weight to be included in the state dict
# and also supports nn.init due to shared storage.
module.register_buffer(fn.name, weight.data)
module.register_buffer(fn.name + "_u", u)
module.register_buffer(fn.name + "_v", v)
setattr(module, POWER_ITERATION_FN, types.MethodType(fn.get_update_method(module), module))
module.register_forward_pre_hook(fn)
return fn
def inplace_spectral_norm(module, name='weight', dim=None, eps=1e-12):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectal norm
dim (int, optional): dimension corresponding to number of outputs,
the default is 0, except for modules that are instances of
ConvTranspose1/2/3d, when it is 1
eps (float, optional): epsilon for numerical stability in
calculating norms
Returns:
The original module with the spectal norm hook
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
Linear (20 -> 40)
>>> m.weight_u.size()
torch.Size([20])
"""
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, dim=dim, eps=eps)
return module
def remove_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(name, module))
| 6,512 | 38.957055 | 119 | py |
steer | steer-master/ffjord/lib/utils.py | import os
import math
from numbers import Number
import logging
import torch
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def get_logger(logpath, filepath, package_files=[], displaying=True, saving=True, debug=False):
logger = logging.getLogger()
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger.setLevel(level)
if saving:
info_file_handler = logging.FileHandler(logpath, mode="a")
info_file_handler.setLevel(level)
logger.addHandler(info_file_handler)
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.info(filepath)
with open(filepath, "r") as f:
logger.info(f.read())
for f in package_files:
logger.info(f)
with open(f, "r") as package_f:
logger.info(package_f.read())
return logger
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
def inf_generator(iterable):
"""Allows training with DataLoaders in a single infinite loop:
for i, (x, y) in enumerate(inf_generator(train_loader)):
"""
iterator = iterable.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = iterable.__iter__()
def save_checkpoint(state, save, epoch):
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, 'checkpt-%04d.pth' % epoch)
torch.save(state, filename)
def isnan(tensor):
return (tensor != tensor)
def logsumexp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
| 3,046 | 24.822034 | 95 | py |
steer | steer-master/ffjord/lib/odenvp.py | import torch
import torch.nn as nn
import lib.layers as layers
from lib.layers.odefunc import ODEnet
import numpy as np
class ODENVP(nn.Module):
"""
Real NVP for image data. Will downsample the input until one of the
dimensions is less than or equal to 4.
Args:
input_size (tuple): 4D tuple of the input size.
n_scale (int): Number of scales for the representation z.
n_resblocks (int): Length of the resnet for each coupling layer.
"""
def __init__(
self,
input_size,
n_scale=float('inf'),
n_blocks=2,
intermediate_dims=(32,),
nonlinearity="softplus",
squash_input=True,
alpha=0.05,
cnf_kwargs=None,
):
super(ODENVP, self).__init__()
self.n_scale = min(n_scale, self._calc_n_scale(input_size))
self.n_blocks = n_blocks
self.intermediate_dims = intermediate_dims
self.nonlinearity = nonlinearity
self.squash_input = squash_input
self.alpha = alpha
self.cnf_kwargs = cnf_kwargs if cnf_kwargs else {}
if not self.n_scale > 0:
raise ValueError('Could not compute number of scales for input of' 'size (%d,%d,%d,%d)' % input_size)
self.transforms = self._build_net(input_size)
self.dims = [o[1:] for o in self.calc_output_size(input_size)]
def _build_net(self, input_size):
_, c, h, w = input_size
transforms = []
for i in range(self.n_scale):
transforms.append(
StackedCNFLayers(
initial_size=(c, h, w),
idims=self.intermediate_dims,
squeeze=(i < self.n_scale - 1), # don't squeeze last layer
init_layer=(layers.LogitTransform(self.alpha) if self.alpha > 0 else layers.ZeroMeanTransform())
if self.squash_input and i == 0 else None,
n_blocks=self.n_blocks,
cnf_kwargs=self.cnf_kwargs,
nonlinearity=self.nonlinearity,
)
)
c, h, w = c * 2, h // 2, w // 2
return nn.ModuleList(transforms)
def get_regularization(self):
if len(self.regularization_fns) == 0:
return None
acc_reg_states = tuple([0.] * len(self.regularization_fns))
for module in self.modules():
if isinstance(module, layers.CNF):
acc_reg_states = tuple(
acc + reg for acc, reg in zip(acc_reg_states, module.get_regularization_states())
)
return sum(state * coeff for state, coeff in zip(acc_reg_states, self.regularization_coeffs))
def _calc_n_scale(self, input_size):
_, _, h, w = input_size
n_scale = 0
while h >= 4 and w >= 4:
n_scale += 1
h = h // 2
w = w // 2
return n_scale
def calc_output_size(self, input_size):
n, c, h, w = input_size
output_sizes = []
for i in range(self.n_scale):
if i < self.n_scale - 1:
c *= 2
h //= 2
w //= 2
output_sizes.append((n, c, h, w))
else:
output_sizes.append((n, c, h, w))
return tuple(output_sizes)
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._generate(x, logpx)
else:
return self._logdensity(x, logpx)
def _logdensity(self, x, logpx=None):
_logpx = torch.zeros(x.shape[0], 1).to(x) if logpx is None else logpx
out = []
for idx in range(len(self.transforms)):
x, _logpx = self.transforms[idx].forward(x, _logpx)
if idx < len(self.transforms) - 1:
d = x.size(1) // 2
x, factor_out = x[:, :d], x[:, d:]
else:
# last layer, no factor out
factor_out = x
out.append(factor_out)
out = [o.view(o.size()[0], -1) for o in out]
out = torch.cat(out, 1)
return out if logpx is None else (out, _logpx)
def _generate(self, z, logpz=None):
z = z.view(z.shape[0], -1)
zs = []
i = 0
for dims in self.dims:
s = np.prod(dims)
zs.append(z[:, i:i + s])
i += s
zs = [_z.view(_z.size()[0], *zsize) for _z, zsize in zip(zs, self.dims)]
_logpz = torch.zeros(zs[0].shape[0], 1).to(zs[0]) if logpz is None else logpz
z_prev, _logpz = self.transforms[-1](zs[-1], _logpz, reverse=True)
for idx in range(len(self.transforms) - 2, -1, -1):
z_prev = torch.cat((z_prev, zs[idx]), dim=1)
z_prev, _logpz = self.transforms[idx](z_prev, _logpz, reverse=True)
return z_prev if logpz is None else (z_prev, _logpz)
class StackedCNFLayers(layers.SequentialFlow):
def __init__(
self,
initial_size,
idims=(32,),
nonlinearity="softplus",
squeeze=True,
init_layer=None,
n_blocks=1,
cnf_kwargs={},
):
strides = tuple([1] + [1 for _ in idims])
chain = []
if init_layer is not None:
chain.append(init_layer)
def _make_odefunc(size):
net = ODEnet(idims, size, strides, True, layer_type="concat", nonlinearity=nonlinearity)
f = layers.ODEfunc(net)
return f
if squeeze:
c, h, w = initial_size
after_squeeze_size = c * 4, h // 2, w // 2
pre = [layers.CNF(_make_odefunc(initial_size), **cnf_kwargs) for _ in range(n_blocks)]
post = [layers.CNF(_make_odefunc(after_squeeze_size), **cnf_kwargs) for _ in range(n_blocks)]
chain += pre + [layers.SqueezeLayer(2)] + post
else:
chain += [layers.CNF(_make_odefunc(initial_size), **cnf_kwargs) for _ in range(n_blocks)]
super(StackedCNFLayers, self).__init__(chain)
| 6,008 | 34.556213 | 116 | py |
steer | steer-master/ffjord/lib/datasets.py | import torch
class Dataset(object):
def __init__(self, loc, transform=None):
self.dataset = torch.load(loc).float().div(255)
self.transform = transform
def __len__(self):
return self.dataset.size(0)
@property
def ndim(self):
return self.dataset.size(1)
def __getitem__(self, index):
x = self.dataset[index]
x = self.transform(x) if self.transform is not None else x
return x, 0
class CelebA(Dataset):
TRAIN_LOC = 'data/celeba/celeba_train.pth'
VAL_LOC = 'data/celeba/celeba_val.pth'
def __init__(self, train=True, transform=None):
return super(CelebA, self).__init__(self.TRAIN_LOC if train else self.VAL_LOC, transform)
| 725 | 24.928571 | 97 | py |
steer | steer-master/ffjord/lib/multiscale_parallel.py | import torch
import torch.nn as nn
import lib.layers as layers
from lib.layers.odefunc import ODEnet
import numpy as np
class MultiscaleParallelCNF(nn.Module):
"""
CNF model for image data.
Squeezes the input into multiple scales, applies different conv-nets at each scale
and adds the resulting gradients
Will downsample the input until one of the
dimensions is less than or equal to 4.
Args:
input_size (tuple): 4D tuple of the input size.
n_scale (int): Number of scales for the representation z.
n_resblocks (int): Length of the resnet for each coupling layer.
"""
def __init__(
self,
input_size,
n_scale=float('inf'),
n_blocks=1,
intermediate_dims=(32,),
alpha=-1,
time_length=1.,
):
super(MultiscaleParallelCNF, self).__init__()
print(input_size)
self.n_scale = min(n_scale, self._calc_n_scale(input_size))
self.n_blocks = n_blocks
self.intermediate_dims = intermediate_dims
self.alpha = alpha
self.time_length = time_length
if not self.n_scale > 0:
raise ValueError('Could not compute number of scales for input of' 'size (%d,%d,%d,%d)' % input_size)
self.transforms = self._build_net(input_size)
def _build_net(self, input_size):
_, c, h, w = input_size
transforms = []
transforms.append(
ParallelCNFLayers(
initial_size=(c, h, w),
idims=self.intermediate_dims,
init_layer=(layers.LogitTransform(self.alpha) if self.alpha > 0 else layers.ZeroMeanTransform()),
n_blocks=self.n_blocks,
time_length=self.time_length
)
)
return nn.ModuleList(transforms)
def get_regularization(self):
if len(self.regularization_fns) == 0:
return None
acc_reg_states = tuple([0.] * len(self.regularization_fns))
for module in self.modules():
if isinstance(module, layers.CNF):
acc_reg_states = tuple(
acc + reg for acc, reg in zip(acc_reg_states, module.get_regularization_states())
)
return sum(state * coeff for state, coeff in zip(acc_reg_states, self.regularization_coeffs))
def _calc_n_scale(self, input_size):
_, _, h, w = input_size
n_scale = 0
while h >= 4 and w >= 4:
n_scale += 1
h = h // 2
w = w // 2
return n_scale
def calc_output_size(self, input_size):
n, c, h, w = input_size
output_sizes = []
for i in range(self.n_scale):
if i < self.n_scale - 1:
c *= 2
h //= 2
w //= 2
output_sizes.append((n, c, h, w))
else:
output_sizes.append((n, c, h, w))
return tuple(output_sizes)
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._generate(x, logpx)
else:
return self._logdensity(x, logpx)
def _logdensity(self, x, logpx=None):
_logpx = torch.zeros(x.shape[0], 1).to(x) if logpx is None else logpx
for idx in range(len(self.transforms)):
x, _logpx = self.transforms[idx].forward(x, _logpx)
return x if logpx is None else (x, _logpx)
def _generate(self, z, logpz=None):
_logpz = torch.zeros(z.shape[0], 1).to(z) if logpz is None else logpz
for idx in reversed(range(len(self.transforms))):
z, _logpz = self.transforms[idx](z, _logpz, reverse=True)
return z if logpz is None else (z, _logpz)
class ParallelSumModules(nn.Module):
def __init__(self, models):
super(ParallelSumModules, self).__init__()
self.models = nn.ModuleList(models)
self.cpu = not torch.cuda.is_available()
def forward(self, t, y):
out = sum(model(t, y) for model in self.models)
return out
class ParallelCNFLayers(layers.SequentialFlow):
def __init__(
self,
initial_size,
idims=(32,),
scales=4,
init_layer=None,
n_blocks=1,
time_length=1.,
):
strides = tuple([1] + [1 for _ in idims])
chain = []
if init_layer is not None:
chain.append(init_layer)
get_size = lambda s: (initial_size[0] * (4**s), initial_size[1] // (2**s), initial_size[2] // (2**s))
def _make_odefunc():
nets = [ODEnet(idims, get_size(scale), strides, True, layer_type="concat", num_squeeze=scale)
for scale in range(scales)]
net = ParallelSumModules(nets)
f = layers.ODEfunc(net)
return f
chain += [layers.CNF(_make_odefunc(), T=time_length) for _ in range(n_blocks)]
super(ParallelCNFLayers, self).__init__(chain)
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cnfs = MultiscaleParallelCNF((13, 3, 32, 32)).to(device)
t = torch.randn(13, 3, 32, 32).to(device)
out = cnfs(t, logpx=None)
print("done") | 5,203 | 31.525 | 113 | py |
steer | steer-master/ffjord/lib/toy_data.py | import numpy as np
import sklearn
import sklearn.datasets
from sklearn.utils import shuffle as util_shuffle
# Dataset iterator
def inf_train_gen(data, rng=None, batch_size=200):
if rng is None:
rng = np.random.RandomState()
if data == "swissroll":
data = sklearn.datasets.make_swiss_roll(n_samples=batch_size, noise=1.0)[0]
data = data.astype("float32")[:, [0, 2]]
data /= 5
return data
elif data == "circles":
data = sklearn.datasets.make_circles(n_samples=batch_size, factor=.5, noise=0.08)[0]
data = data.astype("float32")
data *= 3
return data
elif data == "rings":
n_samples4 = n_samples3 = n_samples2 = batch_size // 4
n_samples1 = batch_size - n_samples4 - n_samples3 - n_samples2
# so as not to have the first point = last point, we set endpoint=False
linspace4 = np.linspace(0, 2 * np.pi, n_samples4, endpoint=False)
linspace3 = np.linspace(0, 2 * np.pi, n_samples3, endpoint=False)
linspace2 = np.linspace(0, 2 * np.pi, n_samples2, endpoint=False)
linspace1 = np.linspace(0, 2 * np.pi, n_samples1, endpoint=False)
circ4_x = np.cos(linspace4)
circ4_y = np.sin(linspace4)
circ3_x = np.cos(linspace4) * 0.75
circ3_y = np.sin(linspace3) * 0.75
circ2_x = np.cos(linspace2) * 0.5
circ2_y = np.sin(linspace2) * 0.5
circ1_x = np.cos(linspace1) * 0.25
circ1_y = np.sin(linspace1) * 0.25
X = np.vstack([
np.hstack([circ4_x, circ3_x, circ2_x, circ1_x]),
np.hstack([circ4_y, circ3_y, circ2_y, circ1_y])
]).T * 3.0
X = util_shuffle(X, random_state=rng)
# Add noise
X = X + rng.normal(scale=0.08, size=X.shape)
return X.astype("float32")
elif data == "moons":
data = sklearn.datasets.make_moons(n_samples=batch_size, noise=0.1)[0]
data = data.astype("float32")
data = data * 2 + np.array([-1, -0.2])
return data
elif data == "8gaussians":
scale = 4.
centers = [(1, 0), (-1, 0), (0, 1), (0, -1), (1. / np.sqrt(2), 1. / np.sqrt(2)),
(1. / np.sqrt(2), -1. / np.sqrt(2)), (-1. / np.sqrt(2),
1. / np.sqrt(2)), (-1. / np.sqrt(2), -1. / np.sqrt(2))]
centers = [(scale * x, scale * y) for x, y in centers]
dataset = []
for i in range(batch_size):
point = rng.randn(2) * 0.5
idx = rng.randint(8)
center = centers[idx]
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype="float32")
dataset /= 1.414
return dataset
elif data == "pinwheel":
radial_std = 0.3
tangential_std = 0.1
num_classes = 5
num_per_class = batch_size // 5
rate = 0.25
rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)
features = rng.randn(num_classes*num_per_class, 2) \
* np.array([radial_std, tangential_std])
features[:, 0] += 1.
labels = np.repeat(np.arange(num_classes), num_per_class)
angles = rads[labels] + rate * np.exp(features[:, 0])
rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
rotations = np.reshape(rotations.T, (-1, 2, 2))
return 2 * rng.permutation(np.einsum("ti,tij->tj", features, rotations))
elif data == "2spirals":
n = np.sqrt(np.random.rand(batch_size // 2, 1)) * 540 * (2 * np.pi) / 360
d1x = -np.cos(n) * n + np.random.rand(batch_size // 2, 1) * 0.5
d1y = np.sin(n) * n + np.random.rand(batch_size // 2, 1) * 0.5
x = np.vstack((np.hstack((d1x, d1y)), np.hstack((-d1x, -d1y)))) / 3
x += np.random.randn(*x.shape) * 0.1
return x
elif data == "checkerboard":
x1 = np.random.rand(batch_size) * 4 - 2
x2_ = np.random.rand(batch_size) - np.random.randint(0, 2, batch_size) * 2
x2 = x2_ + (np.floor(x1) % 2)
return np.concatenate([x1[:, None], x2[:, None]], 1) * 2
elif data == "line":
x = rng.rand(batch_size) * 5 - 2.5
y = x
return np.stack((x, y), 1)
elif data == "cos":
x = rng.rand(batch_size) * 5 - 2.5
y = np.sin(x) * 2.5
return np.stack((x, y), 1)
else:
return inf_train_gen("8gaussians", rng, batch_size)
| 4,517 | 36.032787 | 112 | py |
steer | steer-master/ffjord/lib/custom_optimizers.py | import math
import torch
from torch.optim.optimizer import Optimizer
class Adam(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1**state['step']
bias_correction2 = 1 - beta2**state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add(-step_size * group['weight_decay'], p.data)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 4,597 | 41.574074 | 116 | py |
steer | steer-master/ffjord/lib/visualize_flow.py | import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import torch
LOW = -4
HIGH = 4
def plt_potential_func(potential, ax, npts=100, title="$p(x)$"):
"""
Args:
potential: computes U(z_k) given z_k
"""
xside = np.linspace(LOW, HIGH, npts)
yside = np.linspace(LOW, HIGH, npts)
xx, yy = np.meshgrid(xside, yside)
z = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
z = torch.Tensor(z)
u = potential(z).cpu().numpy()
p = np.exp(-u).reshape(npts, npts)
plt.pcolormesh(xx, yy, p)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def plt_flow(prior_logdensity, transform, ax, npts=100, title="$q(x)$", device="cpu"):
"""
Args:
transform: computes z_k and log(q_k) given z_0
"""
side = np.linspace(LOW, HIGH, npts)
xx, yy = np.meshgrid(side, side)
z = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
z = torch.tensor(z, requires_grad=True).type(torch.float32).to(device)
logqz = prior_logdensity(z)
logqz = torch.sum(logqz, dim=1)[:, None]
z, logqz = transform(z, logqz)
logqz = torch.sum(logqz, dim=1)[:, None]
xx = z[:, 0].cpu().numpy().reshape(npts, npts)
yy = z[:, 1].cpu().numpy().reshape(npts, npts)
qz = np.exp(logqz.cpu().numpy()).reshape(npts, npts)
plt.pcolormesh(xx, yy, qz)
ax.set_xlim(LOW, HIGH)
ax.set_ylim(LOW, HIGH)
cmap = matplotlib.cm.get_cmap(None)
ax.set_facecolor(cmap(0.))
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def plt_flow_density(prior_logdensity, inverse_transform, ax, npts=100, memory=100, title="$q(x)$", device="cpu"):
side = np.linspace(LOW, HIGH, npts)
xx, yy = np.meshgrid(side, side)
x = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
x = torch.from_numpy(x).type(torch.float32).to(device)
zeros = torch.zeros(x.shape[0], 1).to(x)
z, delta_logp = [], []
inds = torch.arange(0, x.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(memory**2)):
z_, delta_logp_ = inverse_transform(x[ii], zeros[ii])
z.append(z_)
delta_logp.append(delta_logp_)
z = torch.cat(z, 0)
delta_logp = torch.cat(delta_logp, 0)
logpz = prior_logdensity(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z)
logpx = logpz - delta_logp
px = np.exp(logpx.cpu().numpy()).reshape(npts, npts)
ax.imshow(px)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def plt_flow_samples(prior_sample, transform, ax, npts=100, memory=100, title="$x ~ q(x)$", device="cpu"):
z = prior_sample(npts * npts, 2).type(torch.float32).to(device)
zk = []
inds = torch.arange(0, z.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(memory**2)):
zk.append(transform(z[ii]))
zk = torch.cat(zk, 0).cpu().numpy()
ax.hist2d(zk[:, 0], zk[:, 1], range=[[LOW, HIGH], [LOW, HIGH]], bins=npts)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def plt_samples(samples, ax, npts=100, title="$x ~ p(x)$"):
ax.hist2d(samples[:, 0], samples[:, 1], range=[[LOW, HIGH], [LOW, HIGH]], bins=npts)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def visualize_transform(
potential_or_samples, prior_sample, prior_density, transform=None, inverse_transform=None, samples=True, npts=100,
memory=100, device="cpu"
):
"""Produces visualization for the model density and samples from the model."""
plt.clf()
ax = plt.subplot(1, 3, 1, aspect="equal")
if samples:
plt_samples(potential_or_samples, ax, npts=npts)
else:
plt_potential_func(potential_or_samples, ax, npts=npts)
ax = plt.subplot(1, 3, 2, aspect="equal")
if inverse_transform is None:
plt_flow(prior_density, transform, ax, npts=npts, device=device)
else:
plt_flow_density(prior_density, inverse_transform, ax, npts=npts, memory=memory, device=device)
ax = plt.subplot(1, 3, 3, aspect="equal")
if transform is not None:
plt_flow_samples(prior_sample, transform, ax, npts=npts, memory=memory, device=device)
| 4,341 | 31.646617 | 118 | py |
steer | steer-master/ffjord/lib/layers/squeeze.py | import torch.nn as nn
__all__ = ['SqueezeLayer']
class SqueezeLayer(nn.Module):
def __init__(self, downscale_factor):
super(SqueezeLayer, self).__init__()
self.downscale_factor = downscale_factor
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._upsample(x, logpx)
else:
return self._downsample(x, logpx)
def _downsample(self, x, logpx=None):
squeeze_x = squeeze(x, self.downscale_factor)
if logpx is None:
return squeeze_x
else:
return squeeze_x, logpx
def _upsample(self, y, logpy=None):
unsqueeze_y = unsqueeze(y, self.downscale_factor)
if logpy is None:
return unsqueeze_y
else:
return unsqueeze_y, logpy
def unsqueeze(input, upscale_factor=2):
'''
[:, C*r^2, H, W] -> [:, C, H*r, W*r]
'''
batch_size, in_channels, in_height, in_width = input.size()
out_channels = in_channels // (upscale_factor**2)
out_height = in_height * upscale_factor
out_width = in_width * upscale_factor
input_view = input.contiguous().view(batch_size, out_channels, upscale_factor, upscale_factor, in_height, in_width)
output = input_view.permute(0, 1, 4, 2, 5, 3).contiguous()
return output.view(batch_size, out_channels, out_height, out_width)
def squeeze(input, downscale_factor=2):
'''
[:, C, H*r, W*r] -> [:, C*r^2, H, W]
'''
batch_size, in_channels, in_height, in_width = input.size()
out_channels = in_channels * (downscale_factor**2)
out_height = in_height // downscale_factor
out_width = in_width // downscale_factor
input_view = input.contiguous().view(
batch_size, in_channels, out_height, downscale_factor, out_width, downscale_factor
)
output = input_view.permute(0, 1, 3, 5, 2, 4).contiguous()
return output.view(batch_size, out_channels, out_height, out_width)
| 1,955 | 29.5625 | 119 | py |
steer | steer-master/ffjord/lib/layers/container.py | import torch.nn as nn
class SequentialFlow(nn.Module):
"""A generalized nn.Sequential container for normalizing flows.
"""
def __init__(self, layersList):
super(SequentialFlow, self).__init__()
self.chain = nn.ModuleList(layersList)
def forward(self, x, logpx=None, reverse=False, inds=None):
if inds is None:
if reverse:
inds = range(len(self.chain) - 1, -1, -1)
else:
inds = range(len(self.chain))
if logpx is None:
for i in inds:
x = self.chain[i](x, reverse=reverse)
return x
else:
for i in inds:
x, logpx = self.chain[i](x, logpx, reverse=reverse)
return x, logpx
| 766 | 27.407407 | 67 | py |
steer | steer-master/ffjord/lib/layers/norm_flows.py | import math
import torch
import torch.nn as nn
from torch.autograd import grad
class PlanarFlow(nn.Module):
def __init__(self, nd=1):
super(PlanarFlow, self).__init__()
self.nd = nd
self.activation = torch.tanh
self.register_parameter('u', nn.Parameter(torch.randn(self.nd)))
self.register_parameter('w', nn.Parameter(torch.randn(self.nd)))
self.register_parameter('b', nn.Parameter(torch.randn(1)))
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.nd)
self.u.data.uniform_(-stdv, stdv)
self.w.data.uniform_(-stdv, stdv)
self.b.data.fill_(0)
self.make_invertible()
def make_invertible(self):
u = self.u.data
w = self.w.data
dot = torch.dot(u, w)
m = -1 + math.log(1 + math.exp(dot))
du = (m - dot) / torch.norm(w) * w
u = u + du
self.u.data = u
def forward(self, z, logp=None, reverse=False):
"""Computes f(z) and log q(f(z))"""
assert not reverse, 'Planar normalizing flow cannot be reversed.'
logp - torch.log(self._detgrad(z) + 1e-8)
h = self.activation(torch.mm(z, self.w.view(self.nd, 1)) + self.b)
z = z + self.u.expand_as(z) * h
f = self.sample(z)
if logp is not None:
qf = self.log_density(z, logp)
return f, qf
else:
return f
def sample(self, z):
"""Computes f(z)"""
h = self.activation(torch.mm(z, self.w.view(self.nd, 1)) + self.b)
output = z + self.u.expand_as(z) * h
return output
def _detgrad(self, z):
"""Computes |det df/dz|"""
with torch.enable_grad():
z = z.requires_grad_(True)
h = self.activation(torch.mm(z, self.w.view(self.nd, 1)) + self.b)
psi = grad(h, z, grad_outputs=torch.ones_like(h), create_graph=True, only_inputs=True)[0]
u_dot_psi = torch.mm(psi, self.u.view(self.nd, 1))
detgrad = 1 + u_dot_psi
return detgrad
def log_density(self, z, logqz):
"""Computes log density of the flow given the log density of z"""
return logqz - torch.log(self._detgrad(z) + 1e-8)
| 2,240 | 31.014286 | 101 | py |
steer | steer-master/ffjord/lib/layers/cnf.py | import torch
import torch.nn as nn
#from torchdiffeq import odeint_adjoint_stochastic_end_v2
from torchdiffeq import odeint_adjoint_stochastic_end_v3
from torchdiffeq import odeint_adjoint_stochastic_end_normal
from torchdiffeq import odeint_adjoint as odeint
#from torchdiffeq import odeint
from .wrappers.cnf_regularization import RegularizedODEfunc
__all__ = ["CNF"]
class CNF(nn.Module):
def __init__(self, odefunc, T=1.0, train_T=False, regularization_fns=None, solver='dopri5', atol=1e-5, rtol=1e-5):
super(CNF, self).__init__()
if train_T:
self.register_parameter("sqrt_end_time", nn.Parameter(torch.sqrt(torch.tensor(T))))
else:
self.register_buffer("sqrt_end_time", torch.sqrt(torch.tensor(T)))
nreg = 0
if regularization_fns is not None:
odefunc = RegularizedODEfunc(odefunc, regularization_fns)
nreg = len(regularization_fns)
self.odefunc = odefunc
self.nreg = nreg
self.regularization_states = None
self.solver = solver
self.atol = atol
self.rtol = rtol
self.test_solver = solver
self.test_atol = atol
self.test_rtol = rtol
self.solver_options = {}
def forward(self, z, logpz=None, integration_times=None, reverse=False):
#print("integration_times")
#print(integration_times)
if logpz is None:
_logpz = torch.zeros(z.shape[0], 1).to(z)
else:
_logpz = logpz
if integration_times is None:
integration_times = torch.tensor([0.0, self.sqrt_end_time * self.sqrt_end_time]).to(z)
if reverse:
integration_times = _flip(integration_times, 0)
# Refresh the odefunc statistics.
self.odefunc.before_odeint()
# Add regularization states.
reg_states = tuple(torch.tensor(0).to(z) for _ in range(self.nreg))
if self.training:
state_t = odeint_adjoint_stochastic_end_v3(
#state_t = odeint_adjoint_stochastic_end_normal(
#state_t = odeint(
self.odefunc,
(z, _logpz) + reg_states,
integration_times.to(z),
atol=[self.atol, self.atol] + [1e20] * len(reg_states) if self.solver == 'dopri5' else self.atol,
rtol=[self.rtol, self.rtol] + [1e20] * len(reg_states) if self.solver == 'dopri5' else self.rtol,
method=self.solver,
options=self.solver_options,
#std = 0.25
min_length = 0.25 #0.001
)
else:
state_t = odeint(
self.odefunc,
(z, _logpz),
integration_times.to(z),
atol=self.test_atol,
rtol=self.test_rtol,
method=self.test_solver,
)
if len(integration_times) == 2:
state_t = tuple(s[1] for s in state_t)
z_t, logpz_t = state_t[:2]
self.regularization_states = state_t[2:]
if logpz is not None:
return z_t, logpz_t
else:
return z_t
def get_regularization_states(self):
reg_states = self.regularization_states
self.regularization_states = None
return reg_states
def num_evals(self):
return self.odefunc._num_evals.item()
def _flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device)
return x[tuple(indices)]
| 3,561 | 32.92381 | 118 | py |
steer | steer-master/ffjord/lib/layers/odefunc.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import diffeq_layers
from .squeeze import squeeze, unsqueeze
__all__ = ["ODEnet", "AutoencoderDiffEqNet", "ODEfunc", "AutoencoderODEfunc"]
def divergence_bf(dx, y, **unused_kwargs):
sum_diag = 0.
for i in range(y.shape[1]):
sum_diag += torch.autograd.grad(dx[:, i].sum(), y, create_graph=True)[0].contiguous()[:, i].contiguous()
return sum_diag.contiguous()
# def divergence_bf(f, y, **unused_kwargs):
# jac = _get_minibatch_jacobian(f, y)
# diagonal = jac.view(jac.shape[0], -1)[:, ::jac.shape[1]]
# return torch.sum(diagonal, 1)
def _get_minibatch_jacobian(y, x):
"""Computes the Jacobian of y wrt x assuming minibatch-mode.
Args:
y: (N, ...) with a total of D_y elements in ...
x: (N, ...) with a total of D_x elements in ...
Returns:
The minibatch Jacobian matrix of shape (N, D_y, D_x)
"""
assert y.shape[0] == x.shape[0]
y = y.view(y.shape[0], -1)
# Compute Jacobian row by row.
jac = []
for j in range(y.shape[1]):
dy_j_dx = torch.autograd.grad(y[:, j], x, torch.ones_like(y[:, j]), retain_graph=True,
create_graph=True)[0].view(x.shape[0], -1)
jac.append(torch.unsqueeze(dy_j_dx, 1))
jac = torch.cat(jac, 1)
return jac
def divergence_approx(f, y, e=None):
e_dzdx = torch.autograd.grad(f, y, e, create_graph=True)[0]
e_dzdx_e = e_dzdx * e
approx_tr_dzdx = e_dzdx_e.view(y.shape[0], -1).sum(dim=1)
return approx_tr_dzdx
def sample_rademacher_like(y):
return torch.randint(low=0, high=2, size=y.shape).to(y) * 2 - 1
def sample_gaussian_like(y):
return torch.randn_like(y)
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.beta = nn.Parameter(torch.tensor(1.0))
def forward(self, x):
return x * torch.sigmoid(self.beta * x)
class Lambda(nn.Module):
def __init__(self, f):
super(Lambda, self).__init__()
self.f = f
def forward(self, x):
return self.f(x)
NONLINEARITIES = {
"tanh": nn.Tanh(),
"relu": nn.ReLU(),
"softplus": nn.Softplus(),
"elu": nn.ELU(),
"swish": Swish(),
"square": Lambda(lambda x: x**2),
"identity": Lambda(lambda x: x),
}
class ODEnet(nn.Module):
"""
Helper class to make neural nets for use in continuous normalizing flows
"""
def __init__(
self, hidden_dims, input_shape, strides, conv, layer_type="concat", nonlinearity="softplus", num_squeeze=0
):
super(ODEnet, self).__init__()
self.num_squeeze = num_squeeze
if conv:
assert len(strides) == len(hidden_dims) + 1
base_layer = {
"ignore": diffeq_layers.IgnoreConv2d,
"hyper": diffeq_layers.HyperConv2d,
"squash": diffeq_layers.SquashConv2d,
"concat": diffeq_layers.ConcatConv2d,
"concat_v2": diffeq_layers.ConcatConv2d_v2,
"concatsquash": diffeq_layers.ConcatSquashConv2d,
"blend": diffeq_layers.BlendConv2d,
"concatcoord": diffeq_layers.ConcatCoordConv2d,
}[layer_type]
else:
strides = [None] * (len(hidden_dims) + 1)
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_shape
for dim_out, stride in zip(hidden_dims + (input_shape[0],), strides):
if stride is None:
layer_kwargs = {}
elif stride == 1:
layer_kwargs = {"ksize": 3, "stride": 1, "padding": 1, "transpose": False}
elif stride == 2:
layer_kwargs = {"ksize": 4, "stride": 2, "padding": 1, "transpose": False}
elif stride == -2:
layer_kwargs = {"ksize": 4, "stride": 2, "padding": 1, "transpose": True}
else:
raise ValueError('Unsupported stride: {}'.format(stride))
layer = base_layer(hidden_shape[0], dim_out, **layer_kwargs)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = list(copy.copy(hidden_shape))
hidden_shape[0] = dim_out
if stride == 2:
hidden_shape[1], hidden_shape[2] = hidden_shape[1] // 2, hidden_shape[2] // 2
elif stride == -2:
hidden_shape[1], hidden_shape[2] = hidden_shape[1] * 2, hidden_shape[2] * 2
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns[:-1])
def forward(self, t, y):
dx = y
# squeeze
for _ in range(self.num_squeeze):
dx = squeeze(dx, 2)
for l, layer in enumerate(self.layers):
dx = layer(t, dx)
# if not last layer, use nonlinearity
if l < len(self.layers) - 1:
dx = self.activation_fns[l](dx)
# unsqueeze
for _ in range(self.num_squeeze):
dx = unsqueeze(dx, 2)
return dx
class AutoencoderDiffEqNet(nn.Module):
"""
Helper class to make neural nets for use in continuous normalizing flows
"""
def __init__(self, hidden_dims, input_shape, strides, conv, layer_type="concat", nonlinearity="softplus"):
super(AutoencoderDiffEqNet, self).__init__()
assert layer_type in ("ignore", "hyper", "concat", "concatcoord", "blend")
assert nonlinearity in ("tanh", "relu", "softplus", "elu")
self.nonlinearity = {"tanh": F.tanh, "relu": F.relu, "softplus": F.softplus, "elu": F.elu}[nonlinearity]
if conv:
assert len(strides) == len(hidden_dims) + 1
base_layer = {
"ignore": diffeq_layers.IgnoreConv2d,
"hyper": diffeq_layers.HyperConv2d,
"squash": diffeq_layers.SquashConv2d,
"concat": diffeq_layers.ConcatConv2d,
"blend": diffeq_layers.BlendConv2d,
"concatcoord": diffeq_layers.ConcatCoordConv2d,
}[layer_type]
else:
strides = [None] * (len(hidden_dims) + 1)
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
# build layers and add them
encoder_layers = []
decoder_layers = []
hidden_shape = input_shape
for i, (dim_out, stride) in enumerate(zip(hidden_dims + (input_shape[0],), strides)):
if i <= len(hidden_dims) // 2:
layers = encoder_layers
else:
layers = decoder_layers
if stride is None:
layer_kwargs = {}
elif stride == 1:
layer_kwargs = {"ksize": 3, "stride": 1, "padding": 1, "transpose": False}
elif stride == 2:
layer_kwargs = {"ksize": 4, "stride": 2, "padding": 1, "transpose": False}
elif stride == -2:
layer_kwargs = {"ksize": 4, "stride": 2, "padding": 1, "transpose": True}
else:
raise ValueError('Unsupported stride: {}'.format(stride))
layers.append(base_layer(hidden_shape[0], dim_out, **layer_kwargs))
hidden_shape = list(copy.copy(hidden_shape))
hidden_shape[0] = dim_out
if stride == 2:
hidden_shape[1], hidden_shape[2] = hidden_shape[1] // 2, hidden_shape[2] // 2
elif stride == -2:
hidden_shape[1], hidden_shape[2] = hidden_shape[1] * 2, hidden_shape[2] * 2
self.encoder_layers = nn.ModuleList(encoder_layers)
self.decoder_layers = nn.ModuleList(decoder_layers)
def forward(self, t, y):
h = y
for layer in self.encoder_layers:
h = self.nonlinearity(layer(t, h))
dx = h
for i, layer in enumerate(self.decoder_layers):
dx = layer(t, dx)
# if not last layer, use nonlinearity
if i < len(self.decoder_layers) - 1:
dx = self.nonlinearity(dx)
return h, dx
class ODEfunc(nn.Module):
def __init__(self, diffeq, divergence_fn="approximate", residual=False, rademacher=False):
super(ODEfunc, self).__init__()
assert divergence_fn in ("brute_force", "approximate")
# self.diffeq = diffeq_layers.wrappers.diffeq_wrapper(diffeq)
self.diffeq = diffeq
self.residual = residual
self.rademacher = rademacher
if divergence_fn == "brute_force":
self.divergence_fn = divergence_bf
elif divergence_fn == "approximate":
self.divergence_fn = divergence_approx
self.register_buffer("_num_evals", torch.tensor(0.))
def before_odeint(self, e=None):
self._e = e
self._num_evals.fill_(0)
def num_evals(self):
return self._num_evals.item()
def forward(self, t, states):
assert len(states) >= 2
y = states[0]
# increment num evals
self._num_evals += 1
# convert to tensor
t = torch.tensor(t).type_as(y)
batchsize = y.shape[0]
# Sample and fix the noise.
if self._e is None:
if self.rademacher:
self._e = sample_rademacher_like(y)
else:
self._e = sample_gaussian_like(y)
with torch.set_grad_enabled(True):
y.requires_grad_(True)
t.requires_grad_(True)
for s_ in states[2:]:
s_.requires_grad_(True)
dy = self.diffeq(t, y, *states[2:])
# Hack for 2D data to use brute force divergence computation.
if not self.training and dy.view(dy.shape[0], -1).shape[1] == 2:
divergence = divergence_bf(dy, y).view(batchsize, 1)
else:
divergence = self.divergence_fn(dy, y, e=self._e).view(batchsize, 1)
if self.residual:
dy = dy - y
divergence -= torch.ones_like(divergence) * torch.tensor(np.prod(y.shape[1:]), dtype=torch.float32
).to(divergence)
return tuple([dy, -divergence] + [torch.zeros_like(s_).requires_grad_(True) for s_ in states[2:]])
class AutoencoderODEfunc(nn.Module):
def __init__(self, autoencoder_diffeq, divergence_fn="approximate", residual=False, rademacher=False):
assert divergence_fn in ("approximate"), "Only approximate divergence supported at the moment. (TODO)"
assert isinstance(autoencoder_diffeq, AutoencoderDiffEqNet)
super(AutoencoderODEfunc, self).__init__()
self.residual = residual
self.autoencoder_diffeq = autoencoder_diffeq
self.rademacher = rademacher
self.register_buffer("_num_evals", torch.tensor(0.))
def before_odeint(self, e=None):
self._e = e
self._num_evals.fill_(0)
def forward(self, t, y_and_logpy):
y, _ = y_and_logpy # remove logpy
# increment num evals
self._num_evals += 1
# convert to tensor
t = torch.tensor(t).type_as(y)
batchsize = y.shape[0]
with torch.set_grad_enabled(True):
y.requires_grad_(True)
t.requires_grad_(True)
h, dy = self.autoencoder_diffeq(t, y)
# Sample and fix the noise.
if self._e is None:
if self.rademacher:
self._e = sample_rademacher_like(h)
else:
self._e = sample_gaussian_like(h)
e_vjp_dhdy = torch.autograd.grad(h, y, self._e, create_graph=True)[0]
e_vjp_dfdy = torch.autograd.grad(dy, h, e_vjp_dhdy, create_graph=True)[0]
divergence = torch.sum((e_vjp_dfdy * self._e).view(batchsize, -1), 1, keepdim=True)
if self.residual:
dy = dy - y
divergence -= torch.ones_like(divergence) * torch.tensor(np.prod(y.shape[1:]), dtype=torch.float32
).to(divergence)
return dy, -divergence
| 12,985 | 34.675824 | 114 | py |
steer | steer-master/ffjord/lib/layers/resnet.py | import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, dim):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.GroupNorm(2, dim, eps=1e-4)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.GroupNorm(2, dim, eps=1e-4)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, dim, cardinality=4, base_depth=32):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
base_width: base number of channels in each group.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
D = cardinality * base_depth
self.conv_reduce = nn.Conv2d(dim, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_grp = nn.Conv2d(D, D, kernel_size=3, stride=1, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, dim, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(dim)
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_grp.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
return F.relu(x + bottleneck, inplace=True)
| 2,335 | 35.5 | 107 | py |
steer | steer-master/ffjord/lib/layers/glow.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class BruteForceLayer(nn.Module):
def __init__(self, dim):
super(BruteForceLayer, self).__init__()
self.weight = nn.Parameter(torch.eye(dim))
def forward(self, x, logpx=None, reverse=False):
if not reverse:
y = F.linear(x, self.weight)
if logpx is None:
return y
else:
return y, logpx - self._logdetgrad.expand_as(logpx)
else:
y = F.linear(x, self.weight.double().inverse().float())
if logpx is None:
return y
else:
return y, logpx + self._logdetgrad.expand_as(logpx)
@property
def _logdetgrad(self):
return torch.log(torch.abs(torch.det(self.weight.double()))).float()
| 836 | 26 | 76 | py |
steer | steer-master/ffjord/lib/layers/elemwise.py | import math
import torch
import torch.nn as nn
_DEFAULT_ALPHA = 1e-6
class ZeroMeanTransform(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, x, logpx=None, reverse=False):
if reverse:
x = x + .5
if logpx is None:
return x
return x, logpx
else:
x = x - .5
if logpx is None:
return x
return x, logpx
class LogitTransform(nn.Module):
"""
The proprocessing step used in Real NVP:
y = sigmoid(x) - a / (1 - 2a)
x = logit(a + (1 - 2a)*y)
"""
def __init__(self, alpha=_DEFAULT_ALPHA):
nn.Module.__init__(self)
self.alpha = alpha
def forward(self, x, logpx=None, reverse=False):
if reverse:
return _sigmoid(x, logpx, self.alpha)
else:
return _logit(x, logpx, self.alpha)
class SigmoidTransform(nn.Module):
"""Reverse of LogitTransform."""
def __init__(self, alpha=_DEFAULT_ALPHA):
nn.Module.__init__(self)
self.alpha = alpha
def forward(self, x, logpx=None, reverse=False):
if reverse:
return _logit(x, logpx, self.alpha)
else:
return _sigmoid(x, logpx, self.alpha)
def _logit(x, logpx=None, alpha=_DEFAULT_ALPHA):
s = alpha + (1 - 2 * alpha) * x
y = torch.log(s) - torch.log(1 - s)
if logpx is None:
return y
return y, logpx - _logdetgrad(x, alpha).view(x.size(0), -1).sum(1, keepdim=True)
def _sigmoid(y, logpy=None, alpha=_DEFAULT_ALPHA):
x = (torch.sigmoid(y) - alpha) / (1 - 2 * alpha)
if logpy is None:
return x
return x, logpy + _logdetgrad(x, alpha).view(x.size(0), -1).sum(1, keepdim=True)
def _logdetgrad(x, alpha):
s = alpha + (1 - 2 * alpha) * x
logdetgrad = -torch.log(s - s * s) + math.log(1 - 2 * alpha)
return logdetgrad
| 1,918 | 24.25 | 84 | py |
steer | steer-master/ffjord/lib/layers/__init__.py | from .elemwise import *
from .container import *
from .cnf import *
from .odefunc import *
from .squeeze import *
from .normalization import *
from . import diffeq_layers
from .coupling import *
from .glow import *
from .norm_flows import *
| 241 | 21 | 28 | py |
steer | steer-master/ffjord/lib/layers/normalization.py | import torch
import torch.nn as nn
from torch.nn import Parameter
__all__ = ['MovingBatchNorm1d', 'MovingBatchNorm2d']
class MovingBatchNormNd(nn.Module):
def __init__(self, num_features, eps=1e-4, decay=0.1, bn_lag=0., affine=True):
super(MovingBatchNormNd, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.decay = decay
self.bn_lag = bn_lag
self.register_buffer('step', torch.zeros(1))
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
@property
def shape(self):
raise NotImplementedError
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.fill_(1)
if self.affine:
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._reverse(x, logpx)
else:
return self._forward(x, logpx)
def _forward(self, x, logpx=None):
c = x.size(1)
used_mean = self.running_mean.clone().detach()
used_var = self.running_var.clone().detach()
if self.training:
# compute batch statistics
x_t = x.transpose(0, 1).contiguous().view(c, -1)
batch_mean = torch.mean(x_t, dim=1)
batch_var = torch.var(x_t, dim=1)
# moving average
if self.bn_lag > 0:
used_mean = batch_mean - (1 - self.bn_lag) * (batch_mean - used_mean.detach())
used_mean /= (1. - self.bn_lag**(self.step[0] + 1))
used_var = batch_var - (1 - self.bn_lag) * (batch_var - used_var.detach())
used_var /= (1. - self.bn_lag**(self.step[0] + 1))
# update running estimates
self.running_mean -= self.decay * (self.running_mean - batch_mean.data)
self.running_var -= self.decay * (self.running_var - batch_var.data)
self.step += 1
# perform normalization
used_mean = used_mean.view(*self.shape).expand_as(x)
used_var = used_var.view(*self.shape).expand_as(x)
y = (x - used_mean) * torch.exp(-0.5 * torch.log(used_var + self.eps))
if self.affine:
weight = self.weight.view(*self.shape).expand_as(x)
bias = self.bias.view(*self.shape).expand_as(x)
y = y * torch.exp(weight) + bias
if logpx is None:
return y
else:
return y, logpx - self._logdetgrad(x, used_var).view(x.size(0), -1).sum(1, keepdim=True)
def _reverse(self, y, logpy=None):
used_mean = self.running_mean
used_var = self.running_var
if self.affine:
weight = self.weight.view(*self.shape).expand_as(y)
bias = self.bias.view(*self.shape).expand_as(y)
y = (y - bias) * torch.exp(-weight)
used_mean = used_mean.view(*self.shape).expand_as(y)
used_var = used_var.view(*self.shape).expand_as(y)
x = y * torch.exp(0.5 * torch.log(used_var + self.eps)) + used_mean
if logpy is None:
return x
else:
return x, logpy + self._logdetgrad(x, used_var).view(x.size(0), -1).sum(1, keepdim=True)
def _logdetgrad(self, x, used_var):
logdetgrad = -0.5 * torch.log(used_var + self.eps)
if self.affine:
weight = self.weight.view(*self.shape).expand(*x.size())
logdetgrad += weight
return logdetgrad
def __repr__(self):
return (
'{name}({num_features}, eps={eps}, decay={decay}, bn_lag={bn_lag},'
' affine={affine})'.format(name=self.__class__.__name__, **self.__dict__)
)
def stable_var(x, mean=None, dim=1):
if mean is None:
mean = x.mean(dim, keepdim=True)
mean = mean.view(-1, 1)
res = torch.pow(x - mean, 2)
max_sqr = torch.max(res, dim, keepdim=True)[0]
var = torch.mean(res / max_sqr, 1, keepdim=True) * max_sqr
var = var.view(-1)
# change nan to zero
var[var != var] = 0
return var
class MovingBatchNorm1d(MovingBatchNormNd):
@property
def shape(self):
return [1, -1]
class MovingBatchNorm2d(MovingBatchNormNd):
@property
def shape(self):
return [1, -1, 1, 1]
| 4,688 | 32.978261 | 100 | py |
steer | steer-master/ffjord/lib/layers/coupling.py | import torch
import torch.nn as nn
__all__ = ['CouplingLayer', 'MaskedCouplingLayer']
class CouplingLayer(nn.Module):
"""Used in 2D experiments."""
def __init__(self, d, intermediate_dim=64, swap=False):
nn.Module.__init__(self)
self.d = d - (d // 2)
self.swap = swap
self.net_s_t = nn.Sequential(
nn.Linear(self.d, intermediate_dim),
nn.ReLU(inplace=True),
nn.Linear(intermediate_dim, intermediate_dim),
nn.ReLU(inplace=True),
nn.Linear(intermediate_dim, (d - self.d) * 2),
)
def forward(self, x, logpx=None, reverse=False):
if self.swap:
x = torch.cat([x[:, self.d:], x[:, :self.d]], 1)
in_dim = self.d
out_dim = x.shape[1] - self.d
s_t = self.net_s_t(x[:, :in_dim])
scale = torch.sigmoid(s_t[:, :out_dim] + 2.)
shift = s_t[:, out_dim:]
logdetjac = torch.sum(torch.log(scale).view(scale.shape[0], -1), 1, keepdim=True)
if not reverse:
y1 = x[:, self.d:] * scale + shift
delta_logp = -logdetjac
else:
y1 = (x[:, self.d:] - shift) / scale
delta_logp = logdetjac
y = torch.cat([x[:, :self.d], y1], 1) if not self.swap else torch.cat([y1, x[:, :self.d]], 1)
if logpx is None:
return y
else:
return y, logpx + delta_logp
class MaskedCouplingLayer(nn.Module):
"""Used in the tabular experiments."""
def __init__(self, d, hidden_dims, mask_type='alternate', swap=False):
nn.Module.__init__(self)
self.d = d
self.register_buffer('mask', sample_mask(d, mask_type, swap).view(1, d))
self.net_scale = build_net(d, hidden_dims, activation="tanh")
self.net_shift = build_net(d, hidden_dims, activation="relu")
def forward(self, x, logpx=None, reverse=False):
scale = torch.exp(self.net_scale(x * self.mask))
shift = self.net_shift(x * self.mask)
masked_scale = scale * (1 - self.mask) + torch.ones_like(scale) * self.mask
masked_shift = shift * (1 - self.mask)
logdetjac = torch.sum(torch.log(masked_scale).view(scale.shape[0], -1), 1, keepdim=True)
if not reverse:
y = x * masked_scale + masked_shift
delta_logp = -logdetjac
else:
y = (x - masked_shift) / masked_scale
delta_logp = logdetjac
if logpx is None:
return y
else:
return y, logpx + delta_logp
def sample_mask(dim, mask_type, swap):
if mask_type == 'alternate':
# Index-based masking in MAF paper.
mask = torch.zeros(dim)
mask[::2] = 1
if swap:
mask = 1 - mask
return mask
elif mask_type == 'channel':
# Masking type used in Real NVP paper.
mask = torch.zeros(dim)
mask[:dim // 2] = 1
if swap:
mask = 1 - mask
return mask
else:
raise ValueError('Unknown mask_type {}'.format(mask_type))
def build_net(input_dim, hidden_dims, activation="relu"):
dims = (input_dim,) + tuple(hidden_dims) + (input_dim,)
activation_modules = {"relu": nn.ReLU(inplace=True), "tanh": nn.Tanh()}
chain = []
for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
chain.append(nn.Linear(in_dim, out_dim))
if i < len(hidden_dims):
chain.append(activation_modules[activation])
return nn.Sequential(*chain)
| 3,525 | 30.20354 | 101 | py |
steer | steer-master/ffjord/lib/layers/wrappers/cnf_regularization.py | import torch
import torch.nn as nn
class RegularizedODEfunc(nn.Module):
def __init__(self, odefunc, regularization_fns):
super(RegularizedODEfunc, self).__init__()
self.odefunc = odefunc
self.regularization_fns = regularization_fns
def before_odeint(self, *args, **kwargs):
self.odefunc.before_odeint(*args, **kwargs)
def forward(self, t, state):
class SharedContext(object):
pass
with torch.enable_grad():
x, logp = state[:2]
x.requires_grad_(True)
logp.requires_grad_(True)
dstate = self.odefunc(t, (x, logp))
if len(state) > 2:
dx, dlogp = dstate[:2]
reg_states = tuple(reg_fn(x, logp, dx, dlogp, SharedContext) for reg_fn in self.regularization_fns)
return dstate + reg_states
else:
return dstate
@property
def _num_evals(self):
return self.odefunc._num_evals
def _batch_root_mean_squared(tensor):
tensor = tensor.view(tensor.shape[0], -1)
return torch.mean(torch.norm(tensor, p=2, dim=1) / tensor.shape[1]**0.5)
def l1_regularzation_fn(x, logp, dx, dlogp, unused_context):
del x, logp, dlogp
return torch.mean(torch.abs(dx))
def l2_regularzation_fn(x, logp, dx, dlogp, unused_context):
del x, logp, dlogp
return _batch_root_mean_squared(dx)
def directional_l2_regularization_fn(x, logp, dx, dlogp, unused_context):
del logp, dlogp
directional_dx = torch.autograd.grad(dx, x, dx, create_graph=True)[0]
return _batch_root_mean_squared(directional_dx)
def jacobian_frobenius_regularization_fn(x, logp, dx, dlogp, context):
del logp, dlogp
if hasattr(context, "jac"):
jac = context.jac
else:
jac = _get_minibatch_jacobian(dx, x)
context.jac = jac
return _batch_root_mean_squared(jac)
def jacobian_diag_frobenius_regularization_fn(x, logp, dx, dlogp, context):
del logp, dlogp
if hasattr(context, "jac"):
jac = context.jac
else:
jac = _get_minibatch_jacobian(dx, x)
context.jac = jac
diagonal = jac.view(jac.shape[0], -1)[:, ::jac.shape[1]] # assumes jac is minibatch square, ie. (N, M, M).
return _batch_root_mean_squared(diagonal)
def jacobian_offdiag_frobenius_regularization_fn(x, logp, dx, dlogp, context):
del logp, dlogp
if hasattr(context, "jac"):
jac = context.jac
else:
jac = _get_minibatch_jacobian(dx, x)
context.jac = jac
diagonal = jac.view(jac.shape[0], -1)[:, ::jac.shape[1]] # assumes jac is minibatch square, ie. (N, M, M).
ss_offdiag = torch.sum(jac.view(jac.shape[0], -1)**2, dim=1) - torch.sum(diagonal**2, dim=1)
ms_offdiag = ss_offdiag / (diagonal.shape[1] * (diagonal.shape[1] - 1))
return torch.mean(ms_offdiag)
def _get_minibatch_jacobian(y, x, create_graph=False):
"""Computes the Jacobian of y wrt x assuming minibatch-mode.
Args:
y: (N, ...) with a total of D_y elements in ...
x: (N, ...) with a total of D_x elements in ...
Returns:
The minibatch Jacobian matrix of shape (N, D_y, D_x)
"""
assert y.shape[0] == x.shape[0]
y = y.view(y.shape[0], -1)
# Compute Jacobian row by row.
jac = []
for j in range(y.shape[1]):
dy_j_dx = torch.autograd.grad(y[:, j], x, torch.ones_like(y[:, j]), retain_graph=True,
create_graph=True)[0].view(x.shape[0], -1)
jac.append(torch.unsqueeze(dy_j_dx, 1))
jac = torch.cat(jac, 1)
return jac
| 3,591 | 31.654545 | 115 | py |
steer | steer-master/ffjord/lib/layers/diffeq_layers/container.py | import torch
import torch.nn as nn
from .wrappers import diffeq_wrapper
class SequentialDiffEq(nn.Module):
"""A container for a sequential chain of layers. Supports both regular and diffeq layers.
"""
def __init__(self, *layers):
super(SequentialDiffEq, self).__init__()
self.layers = nn.ModuleList([diffeq_wrapper(layer) for layer in layers])
def forward(self, t, x):
for layer in self.layers:
x = layer(t, x)
return x
class MixtureODELayer(nn.Module):
"""Produces a mixture of experts where output = sigma(t) * f(t, x).
Time-dependent weights sigma(t) help learn to blend the experts without resorting to a highly stiff f.
Supports both regular and diffeq experts.
"""
def __init__(self, experts):
super(MixtureODELayer, self).__init__()
assert len(experts) > 1
wrapped_experts = [diffeq_wrapper(ex) for ex in experts]
self.experts = nn.ModuleList(wrapped_experts)
self.mixture_weights = nn.Linear(1, len(self.experts))
def forward(self, t, y):
dys = []
for f in self.experts:
dys.append(f(t, y))
dys = torch.stack(dys, 0)
weights = self.mixture_weights(t).view(-1, *([1] * (dys.ndimension() - 1)))
dy = torch.sum(dys * weights, dim=0, keepdim=False)
return dy
| 1,357 | 30.581395 | 106 | py |
steer | steer-master/ffjord/lib/layers/diffeq_layers/resnet.py | import torch.nn as nn
from . import basic
from . import container
NGROUPS = 16
class ResNet(container.SequentialDiffEq):
def __init__(self, dim, intermediate_dim, n_resblocks, conv_block=None):
super(ResNet, self).__init__()
if conv_block is None:
conv_block = basic.ConcatCoordConv2d
self.dim = dim
self.intermediate_dim = intermediate_dim
self.n_resblocks = n_resblocks
layers = []
layers.append(conv_block(dim, intermediate_dim, ksize=3, stride=1, padding=1, bias=False))
for _ in range(n_resblocks):
layers.append(BasicBlock(intermediate_dim, conv_block))
layers.append(nn.GroupNorm(NGROUPS, intermediate_dim, eps=1e-4))
layers.append(nn.ReLU(inplace=True))
layers.append(conv_block(intermediate_dim, dim, ksize=1, bias=False))
super(ResNet, self).__init__(*layers)
def __repr__(self):
return (
'{name}({dim}, intermediate_dim={intermediate_dim}, n_resblocks={n_resblocks})'.format(
name=self.__class__.__name__, **self.__dict__
)
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, dim, conv_block=None):
super(BasicBlock, self).__init__()
if conv_block is None:
conv_block = basic.ConcatCoordConv2d
self.norm1 = nn.GroupNorm(NGROUPS, dim, eps=1e-4)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = conv_block(dim, dim, ksize=3, stride=1, padding=1, bias=False)
self.norm2 = nn.GroupNorm(NGROUPS, dim, eps=1e-4)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = conv_block(dim, dim, ksize=3, stride=1, padding=1, bias=False)
def forward(self, t, x):
residual = x
out = self.norm1(x)
out = self.relu1(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu2(out)
out = self.conv2(t, out)
out += residual
return out
| 2,003 | 28.470588 | 99 | py |
steer | steer-master/ffjord/lib/layers/diffeq_layers/wrappers.py | from inspect import signature
import torch.nn as nn
__all__ = ["diffeq_wrapper", "reshape_wrapper"]
class DiffEqWrapper(nn.Module):
def __init__(self, module):
super(DiffEqWrapper, self).__init__()
self.module = module
if len(signature(self.module.forward).parameters) == 1:
self.diffeq = lambda t, y: self.module(y)
elif len(signature(self.module.forward).parameters) == 2:
self.diffeq = self.module
else:
raise ValueError("Differential equation needs to either take (t, y) or (y,) as input.")
def forward(self, t, y):
return self.diffeq(t, y)
def __repr__(self):
return self.diffeq.__repr__()
def diffeq_wrapper(layer):
return DiffEqWrapper(layer)
class ReshapeDiffEq(nn.Module):
def __init__(self, input_shape, net):
super(ReshapeDiffEq, self).__init__()
assert len(signature(net.forward).parameters) == 2, "use diffeq_wrapper before reshape_wrapper."
self.input_shape = input_shape
self.net = net
def forward(self, t, x):
batchsize = x.shape[0]
x = x.view(batchsize, *self.input_shape)
return self.net(t, x).view(batchsize, -1)
def __repr__(self):
return self.diffeq.__repr__()
def reshape_wrapper(input_shape, layer):
return ReshapeDiffEq(input_shape, layer)
| 1,365 | 28.06383 | 104 | py |
steer | steer-master/ffjord/lib/layers/diffeq_layers/__init__.py | from .container import *
from .resnet import *
from .basic import *
from .wrappers import *
| 92 | 17.6 | 24 | py |
steer | steer-master/ffjord/lib/layers/diffeq_layers/basic.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1 or classname.find('Conv') != -1:
nn.init.constant_(m.weight, 0)
nn.init.normal_(m.bias, 0, 0.01)
class HyperLinear(nn.Module):
def __init__(self, dim_in, dim_out, hypernet_dim=8, n_hidden=1, activation=nn.Tanh):
super(HyperLinear, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.params_dim = self.dim_in * self.dim_out + self.dim_out
layers = []
dims = [1] + [hypernet_dim] * n_hidden + [self.params_dim]
for i in range(1, len(dims)):
layers.append(nn.Linear(dims[i - 1], dims[i]))
if i < len(dims) - 1:
layers.append(activation())
self._hypernet = nn.Sequential(*layers)
self._hypernet.apply(weights_init)
def forward(self, t, x):
params = self._hypernet(t.view(1, 1)).view(-1)
b = params[:self.dim_out].view(self.dim_out)
w = params[self.dim_out:].view(self.dim_out, self.dim_in)
return F.linear(x, w, b)
class IgnoreLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(IgnoreLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
def forward(self, t, x):
return self._layer(x)
class ConcatLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(ConcatLinear, self).__init__()
self._layer = nn.Linear(dim_in + 1, dim_out)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class ConcatLinear_v2(nn.Module):
def __init__(self, dim_in, dim_out):
super(ConcatLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
def forward(self, t, x):
return self._layer(x) + self._hyper_bias(t.view(1, 1))
class SquashLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(SquashLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper = nn.Linear(1, dim_out)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper(t.view(1, 1)))
class ConcatSquashLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(ConcatSquashLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
self._hyper_gate = nn.Linear(1, dim_out)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper_gate(t.view(1, 1))) \
+ self._hyper_bias(t.view(1, 1))
class HyperConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(HyperConv2d, self).__init__()
assert dim_in % groups == 0 and dim_out % groups == 0, "dim_in and dim_out must both be divisible by groups."
self.dim_in = dim_in
self.dim_out = dim_out
self.ksize = ksize
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.transpose = transpose
self.params_dim = int(dim_in * dim_out * ksize * ksize / groups)
if self.bias:
self.params_dim += dim_out
self._hypernet = nn.Linear(1, self.params_dim)
self.conv_fn = F.conv_transpose2d if transpose else F.conv2d
self._hypernet.apply(weights_init)
def forward(self, t, x):
params = self._hypernet(t.view(1, 1)).view(-1)
weight_size = int(self.dim_in * self.dim_out * self.ksize * self.ksize / self.groups)
if self.transpose:
weight = params[:weight_size].view(self.dim_in, self.dim_out // self.groups, self.ksize, self.ksize)
else:
weight = params[:weight_size].view(self.dim_out, self.dim_in // self.groups, self.ksize, self.ksize)
bias = params[:self.dim_out].view(self.dim_out) if self.bias else None
return self.conv_fn(
x, weight=weight, bias=bias, stride=self.stride, padding=self.padding, groups=self.groups,
dilation=self.dilation
)
class IgnoreConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(IgnoreConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
return self._layer(x)
class SquashConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(SquashConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in + 1, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
self._hyper = nn.Linear(1, dim_out)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper(t.view(1, 1))).view(1, -1, 1, 1)
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in + 1, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class ConcatConv2d_v2(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
def forward(self, t, x):
return self._layer(x) + self._hyper_bias(t.view(1, 1)).view(1, -1, 1, 1)
class ConcatSquashConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatSquashConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
self._hyper_gate = nn.Linear(1, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper_gate(t.view(1, 1))).view(1, -1, 1, 1) \
+ self._hyper_bias(t.view(1, 1)).view(1, -1, 1, 1)
class ConcatCoordConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatCoordConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in + 3, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
b, c, h, w = x.shape
hh = torch.arange(h).to(x).view(1, 1, h, 1).expand(b, 1, h, w)
ww = torch.arange(w).to(x).view(1, 1, 1, w).expand(b, 1, h, w)
tt = t.to(x).view(1, 1, 1, 1).expand(b, 1, h, w)
x_aug = torch.cat([x, tt, hh, ww], 1)
return self._layer(x_aug)
class GatedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(GatedLinear, self).__init__()
self.layer_f = nn.Linear(in_features, out_features)
self.layer_g = nn.Linear(in_features, out_features)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
class GatedConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1):
super(GatedConv, self).__init__()
self.layer_f = nn.Conv2d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=1, groups=groups
)
self.layer_g = nn.Conv2d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=1, groups=groups
)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
class GatedConvTranspose(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1):
super(GatedConvTranspose, self).__init__()
self.layer_f = nn.ConvTranspose2d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding,
groups=groups
)
self.layer_g = nn.ConvTranspose2d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding,
groups=groups
)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
class BlendLinear(nn.Module):
def __init__(self, dim_in, dim_out, layer_type=nn.Linear, **unused_kwargs):
super(BlendLinear, self).__init__()
self._layer0 = layer_type(dim_in, dim_out)
self._layer1 = layer_type(dim_in, dim_out)
def forward(self, t, x):
y0 = self._layer0(x)
y1 = self._layer1(x)
return y0 + (y1 - y0) * t
class BlendConv2d(nn.Module):
def __init__(
self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False,
**unused_kwargs
):
super(BlendConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer0 = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
self._layer1 = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
y0 = self._layer0(x)
y1 = self._layer1(x)
return y0 + (y1 - y0) * t
| 11,057 | 36.869863 | 120 | py |
steer | steer-master/latent_ode/mujoco_physics.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Authors: Yulia Rubanova and Ricky Chen
###########################
import os
import numpy as np
import torch
from lib.utils import get_dict_template
import lib.utils as utils
from torchvision.datasets.utils import download_url
class HopperPhysics(object):
T = 200
D = 14
n_training_samples = 10000
training_file = 'training.pt'
def __init__(self, root, download = True, generate=False, device = torch.device("cpu")):
self.root = root
if download:
self._download()
if generate:
self._generate_dataset()
if not self._check_exists():
raise RuntimeError('Dataset not found.' + ' You can use download=True to download it')
data_file = os.path.join(self.data_folder, self.training_file)
self.data = torch.Tensor(torch.load(data_file)).to(device)
self.data, self.data_min, self.data_max = utils.normalize_data(self.data)
self.device =device
def visualize(self, traj, plot_name = 'traj', dirname='hopper_imgs', video_name = None):
r"""Generates images of the trajectory and stores them as <dirname>/traj<index>-<t>.jpg"""
T, D = traj.size()
traj = traj.cpu() * self.data_max.cpu() + self.data_min.cpu()
try:
from dm_control import suite # noqa: F401
except ImportError as e:
raise Exception('Deepmind Control Suite is required to visualize the dataset.') from e
try:
from PIL import Image # noqa: F401
except ImportError as e:
raise Exception('PIL is required to visualize the dataset.') from e
def save_image(data, filename):
im = Image.fromarray(data)
im.save(filename)
os.makedirs(dirname, exist_ok=True)
env = suite.load('hopper', 'stand')
physics = env.physics
for t in range(T):
with physics.reset_context():
physics.data.qpos[:] = traj[t, :D // 2]
physics.data.qvel[:] = traj[t, D // 2:]
save_image(
physics.render(height=480, width=640, camera_id=0),
os.path.join(dirname, plot_name + '-{:03d}.jpg'.format(t))
)
def _generate_dataset(self):
if self._check_exists():
return
os.makedirs(self.data_folder, exist_ok=True)
print('Generating dataset...')
train_data = self._generate_random_trajectories(self.n_training_samples)
torch.save(train_data, os.path.join(self.data_folder, self.training_file))
def _download(self):
if self._check_exists():
return
print("Downloading the dataset [325MB] ...")
os.makedirs(self.data_folder, exist_ok=True)
url = "http://www.cs.toronto.edu/~rtqichen/datasets/HopperPhysics/training.pt"
download_url(url, self.data_folder, "training.pt", None)
def _generate_random_trajectories(self, n_samples):
try:
from dm_control import suite # noqa: F401
except ImportError as e:
raise Exception('Deepmind Control Suite is required to generate the dataset.') from e
env = suite.load('hopper', 'stand')
physics = env.physics
# Store the state of the RNG to restore later.
st0 = np.random.get_state()
np.random.seed(123)
data = np.zeros((n_samples, self.T, self.D))
for i in range(n_samples):
with physics.reset_context():
# x and z positions of the hopper. We want z > 0 for the hopper to stay above ground.
physics.data.qpos[:2] = np.random.uniform(0, 0.5, size=2)
physics.data.qpos[2:] = np.random.uniform(-2, 2, size=physics.data.qpos[2:].shape)
physics.data.qvel[:] = np.random.uniform(-5, 5, size=physics.data.qvel.shape)
for t in range(self.T):
data[i, t, :self.D // 2] = physics.data.qpos
data[i, t, self.D // 2:] = physics.data.qvel
physics.step()
# Restore RNG.
np.random.set_state(st0)
return data
def _check_exists(self):
return os.path.exists(os.path.join(self.data_folder, self.training_file))
@property
def data_folder(self):
return os.path.join(self.root, self.__class__.__name__)
# def __getitem__(self, index):
# return self.data[index]
def get_dataset(self):
return self.data
def __len__(self):
return len(self.data)
def size(self, ind = None):
if ind is not None:
return self.data.shape[ind]
return self.data.shape
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
return fmt_str
| 4,315 | 27.966443 | 92 | py |
steer | steer-master/latent_ode/person_activity.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Authors: Yulia Rubanova and Ricky Chen
###########################
import os
import lib.utils as utils
import numpy as np
import tarfile
import torch
from torch.utils.data import DataLoader
from torchvision.datasets.utils import download_url
from lib.utils import get_device
# Adapted from: https://github.com/rtqichen/time-series-datasets
class PersonActivity(object):
urls = [
'https://archive.ics.uci.edu/ml/machine-learning-databases/00196/ConfLongDemo_JSI.txt',
]
tag_ids = [
"010-000-024-033", #"ANKLE_LEFT",
"010-000-030-096", #"ANKLE_RIGHT",
"020-000-033-111", #"CHEST",
"020-000-032-221" #"BELT"
]
tag_dict = {k: i for i, k in enumerate(tag_ids)}
label_names = [
"walking",
"falling",
"lying down",
"lying",
"sitting down",
"sitting",
"standing up from lying",
"on all fours",
"sitting on the ground",
"standing up from sitting",
"standing up from sit on grnd"
]
#label_dict = {k: i for i, k in enumerate(label_names)}
#Merge similar labels into one class
label_dict = {
"walking": 0,
"falling": 1,
"lying": 2,
"lying down": 2,
"sitting": 3,
"sitting down" : 3,
"standing up from lying": 4,
"standing up from sitting": 4,
"standing up from sit on grnd": 4,
"on all fours": 5,
"sitting on the ground": 6
}
def __init__(self, root, download=False,
reduce='average', max_seq_length = 50,
n_samples = None, device = torch.device("cpu")):
self.root = root
self.reduce = reduce
self.max_seq_length = max_seq_length
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found. You can use download=True to download it')
if device == torch.device("cpu"):
self.data = torch.load(os.path.join(self.processed_folder, self.data_file), map_location='cpu')
else:
self.data = torch.load(os.path.join(self.processed_folder, self.data_file))
if n_samples is not None:
self.data = self.data[:n_samples]
def download(self):
if self._check_exists():
return
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
def save_record(records, record_id, tt, vals, mask, labels):
tt = torch.tensor(tt).to(self.device)
vals = torch.stack(vals)
mask = torch.stack(mask)
labels = torch.stack(labels)
# flatten the measurements for different tags
vals = vals.reshape(vals.size(0), -1)
mask = mask.reshape(mask.size(0), -1)
assert(len(tt) == vals.size(0))
assert(mask.size(0) == vals.size(0))
assert(labels.size(0) == vals.size(0))
#records.append((record_id, tt, vals, mask, labels))
seq_length = len(tt)
# split the long time series into smaller ones
offset = 0
slide = self.max_seq_length // 2
while (offset + self.max_seq_length < seq_length):
idx = range(offset, offset + self.max_seq_length)
first_tp = tt[idx][0]
records.append((record_id, tt[idx] - first_tp, vals[idx], mask[idx], labels[idx]))
offset += slide
for url in self.urls:
filename = url.rpartition('/')[2]
download_url(url, self.raw_folder, filename, None)
print('Processing {}...'.format(filename))
dirname = os.path.join(self.raw_folder)
records = []
first_tp = None
for txtfile in os.listdir(dirname):
with open(os.path.join(dirname, txtfile)) as f:
lines = f.readlines()
prev_time = -1
tt = []
record_id = None
for l in lines:
cur_record_id, tag_id, time, date, val1, val2, val3, label = l.strip().split(',')
value_vec = torch.Tensor((float(val1), float(val2), float(val3))).to(self.device)
time = float(time)
if cur_record_id != record_id:
if record_id is not None:
save_record(records, record_id, tt, vals, mask, labels)
tt, vals, mask, nobs, labels = [], [], [], [], []
record_id = cur_record_id
tt = [torch.zeros(1).to(self.device)]
vals = [torch.zeros(len(self.tag_ids),3).to(self.device)]
mask = [torch.zeros(len(self.tag_ids),3).to(self.device)]
nobs = [torch.zeros(len(self.tag_ids)).to(self.device)]
labels = [torch.zeros(len(self.label_names)).to(self.device)]
first_tp = time
time = round((time - first_tp)/ 10**5)
prev_time = time
else:
# for speed -- we actually don't need to quantize it in Latent ODE
time = round((time - first_tp)/ 10**5) # quatizing by 100 ms. 10,000 is one millisecond, 10,000,000 is one second
if time != prev_time:
tt.append(time)
vals.append(torch.zeros(len(self.tag_ids),3).to(self.device))
mask.append(torch.zeros(len(self.tag_ids),3).to(self.device))
nobs.append(torch.zeros(len(self.tag_ids)).to(self.device))
labels.append(torch.zeros(len(self.label_names)).to(self.device))
prev_time = time
if tag_id in self.tag_ids:
n_observations = nobs[-1][self.tag_dict[tag_id]]
if (self.reduce == 'average') and (n_observations > 0):
prev_val = vals[-1][self.tag_dict[tag_id]]
new_val = (prev_val * n_observations + value_vec) / (n_observations + 1)
vals[-1][self.tag_dict[tag_id]] = new_val
else:
vals[-1][self.tag_dict[tag_id]] = value_vec
mask[-1][self.tag_dict[tag_id]] = 1
nobs[-1][self.tag_dict[tag_id]] += 1
if label in self.label_names:
if torch.sum(labels[-1][self.label_dict[label]]) == 0:
labels[-1][self.label_dict[label]] = 1
else:
assert tag_id == 'RecordID', 'Read unexpected tag id {}'.format(tag_id)
save_record(records, record_id, tt, vals, mask, labels)
torch.save(
records,
os.path.join(self.processed_folder, 'data.pt')
)
print('Done!')
def _check_exists(self):
for url in self.urls:
filename = url.rpartition('/')[2]
if not os.path.exists(
os.path.join(self.processed_folder, 'data.pt')
):
return False
return True
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def data_file(self):
return 'data.pt'
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
fmt_str += ' Max length: {}\n'.format(self.max_seq_length)
fmt_str += ' Reduce: {}\n'.format(self.reduce)
return fmt_str
def get_person_id(record_id):
# The first letter is the person id
person_id = record_id[0]
person_id = ord(person_id) - ord("A")
return person_id
def variable_time_collate_fn_activity(batch, args, device = torch.device("cpu"), data_type = "train"):
"""
Expects a batch of time series data in the form of (record_id, tt, vals, mask, labels) where
- record_id is a patient id
- tt is a 1-dimensional tensor containing T time values of observations.
- vals is a (T, D) tensor containing observed values for D variables.
- mask is a (T, D) tensor containing 1 where values were observed and 0 otherwise.
- labels is a list of labels for the current patient, if labels are available. Otherwise None.
Returns:
combined_tt: The union of all time observations.
combined_vals: (M, T, D) tensor containing the observed values.
combined_mask: (M, T, D) tensor containing 1 where values were observed and 0 otherwise.
"""
D = batch[0][2].shape[1]
N = batch[0][-1].shape[1] # number of labels
combined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)
combined_tt = combined_tt.to(device)
offset = 0
combined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)
combined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)
combined_labels = torch.zeros([len(batch), len(combined_tt), N]).to(device)
for b, (record_id, tt, vals, mask, labels) in enumerate(batch):
tt = tt.to(device)
vals = vals.to(device)
mask = mask.to(device)
labels = labels.to(device)
indices = inverse_indices[offset:offset + len(tt)]
offset += len(tt)
combined_vals[b, indices] = vals
combined_mask[b, indices] = mask
combined_labels[b, indices] = labels
combined_tt = combined_tt.float()
if torch.max(combined_tt) != 0.:
combined_tt = combined_tt / torch.max(combined_tt)
data_dict = {
"data": combined_vals,
"time_steps": combined_tt,
"mask": combined_mask,
"labels": combined_labels}
data_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)
return data_dict
if __name__ == '__main__':
torch.manual_seed(1991)
dataset = PersonActivity('data/PersonActivity', download=True)
dataloader = DataLoader(dataset, batch_size=30, shuffle=True, collate_fn= variable_time_collate_fn_activity)
dataloader.__iter__().next()
| 9,173 | 29.682274 | 120 | py |
steer | steer-master/latent_ode/generate_timeseries.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
# Create a synthetic dataset
from __future__ import print_function
from __future__ import absolute_import, division
import lib.utils as utils
import torch
import matplotlib.image
import matplotlib.pyplot as plt
import pickle
from scipy.special import expit as sigmoid
import numpy.random as npr
import numpy as np
import os
import matplotlib
if os.path.exists("/Users/yulia"):
matplotlib.use('TkAgg')
else:
matplotlib.use('Agg')
# ======================================================================================
def get_next_val(init, t, tmin, tmax, final=None):
if final is None:
return init
val = init + (final - init) / (tmax - tmin) * t
return val
def generate_periodic(time_steps, init_freq, init_amplitude, starting_point,
final_freq=None, final_amplitude=None, phi_offset=0.):
tmin = time_steps.min()
tmax = time_steps.max()
data = []
t_prev = time_steps[0]
phi = phi_offset
for t in time_steps:
dt = t - t_prev
amp = get_next_val(init_amplitude, t, tmin, tmax, final_amplitude)
freq = get_next_val(init_freq, t, tmin, tmax, final_freq)
phi = phi + 2 * np.pi * freq * dt # integrate to get phase
phi2 = phi + 2 * np.pi * freq * 0.125 * dt # integrate to get phase
#print("Initial amplitude")
#print(init_amplitude)
#print("Amplitude")
#print(amp)
y = amp * np.sin(phi) + starting_point
#y = amp*0.001*np.exp(t) +2.5*amp * np.sin(phi) + starting_point
#y = amp*0.001*np.exp(t) + 2.5*amp * np.sin(phi) + 2.5*amp * np.sin(phi2)+ starting_point
t_prev = t
data.append([t, y])
return np.array(data)
def assign_value_or_sample(value, sampling_interval=[0., 1.]):
if value is None:
int_length = sampling_interval[1] - sampling_interval[0]
return np.random.random() * int_length + sampling_interval[0]
else:
return value
class TimeSeries:
def __init__(self, device=torch.device("cpu")):
self.device = device
self.z0 = None
def init_visualization(self):
self.fig = plt.figure(figsize=(10, 4), facecolor='white')
self.ax = self.fig.add_subplot(111, frameon=False)
plt.show(block=False)
def visualize(self, truth):
self.ax.plot(truth[:, 0], truth[:, 1])
def add_noise(self, traj_list, time_steps, noise_weight):
n_samples = traj_list.size(0)
# Add noise to all the points except the first point
n_tp = len(time_steps) - 1
noise = np.random.sample((n_samples, n_tp))
noise = torch.Tensor(noise).to(self.device)
traj_list_w_noise = traj_list.clone()
# Dimension [:,:,0] is a time dimension -- do not add noise to that
traj_list_w_noise[:, 1:, 0] += noise_weight * noise
return traj_list_w_noise
class Periodic_1d(TimeSeries):
def __init__(self, device=torch.device("cpu"),
init_freq=0.3, init_amplitude=1.,
final_amplitude=10., final_freq=1.,
z0=0.):
"""
If some of the parameters (init_freq, init_amplitude, final_amplitude, final_freq) is not provided, it is randomly sampled.
For now, all the time series share the time points and the starting point.
"""
super(Periodic_1d, self).__init__(device)
self.init_freq = init_freq
self.init_amplitude = init_amplitude
self.final_amplitude = final_amplitude
self.final_freq = final_freq
self.z0 = z0
def sample_traj(self, time_steps, n_samples=1, noise_weight=1.,
cut_out_section=None):
"""
Sample periodic functions.
"""
traj_list = []
for i in range(n_samples):
init_freq = assign_value_or_sample(self.init_freq, [0.4, 0.8])
if self.final_freq is None:
final_freq = init_freq
else:
final_freq = assign_value_or_sample(
self.final_freq, [0.4, 0.8])
init_amplitude = assign_value_or_sample(
self.init_amplitude, [0., 1.])
final_amplitude = assign_value_or_sample(
self.final_amplitude, [0., 1.])
noisy_z0 = self.z0 + np.random.normal(loc=0., scale=0.1)
traj = generate_periodic(time_steps, init_freq=init_freq,
init_amplitude=init_amplitude, starting_point=noisy_z0,
final_amplitude=final_amplitude, final_freq=final_freq)
# Cut the time dimension
traj = np.expand_dims(traj[:, 1:], 0)
traj_list.append(traj)
# shape: [n_samples, n_timesteps, 2]
# traj_list[:,:,0] -- time stamps
# traj_list[:,:,1] -- values at the time stamps
traj_list = np.array(traj_list)
traj_list = torch.Tensor().new_tensor(traj_list, device=self.device)
traj_list = traj_list.squeeze(1)
traj_list = self.add_noise(traj_list, time_steps, noise_weight)
return traj_list
| 5,248 | 33.761589 | 131 | py |
steer | steer-master/latent_ode/physionet.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Authors: Yulia Rubanova and Ricky Chen
###########################
import os
import matplotlib
if os.path.exists("/Users/yulia"):
matplotlib.use('TkAgg')
else:
matplotlib.use('Agg')
import matplotlib.pyplot
import matplotlib.pyplot as plt
import lib.utils as utils
import numpy as np
import tarfile
import torch
from torch.utils.data import DataLoader
from torchvision.datasets.utils import download_url
from lib.utils import get_device
# Adapted from: https://github.com/rtqichen/time-series-datasets
# get minimum and maximum for each feature across the whole dataset
def get_data_min_max(records):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_min, data_max = None, None
inf = torch.Tensor([float("Inf")])[0].to(device)
for b, (record_id, tt, vals, mask, labels) in enumerate(records):
n_features = vals.size(-1)
batch_min = []
batch_max = []
for i in range(n_features):
non_missing_vals = vals[:,i][mask[:,i] == 1]
if len(non_missing_vals) == 0:
batch_min.append(inf)
batch_max.append(-inf)
else:
batch_min.append(torch.min(non_missing_vals))
batch_max.append(torch.max(non_missing_vals))
batch_min = torch.stack(batch_min)
batch_max = torch.stack(batch_max)
if (data_min is None) and (data_max is None):
data_min = batch_min
data_max = batch_max
else:
data_min = torch.min(data_min, batch_min)
data_max = torch.max(data_max, batch_max)
return data_min, data_max
class PhysioNet(object):
urls = [
'https://physionet.org/files/challenge-2012/1.0.0/set-a.tar.gz?download',
'https://physionet.org/files/challenge-2012/1.0.0/set-b.tar.gz?download',
]
outcome_urls = ['https://physionet.org/files/challenge-2012/1.0.0/Outcomes-a.txt']
params = [
'Age', 'Gender', 'Height', 'ICUType', 'Weight', 'Albumin', 'ALP', 'ALT', 'AST', 'Bilirubin', 'BUN',
'Cholesterol', 'Creatinine', 'DiasABP', 'FiO2', 'GCS', 'Glucose', 'HCO3', 'HCT', 'HR', 'K', 'Lactate', 'Mg',
'MAP', 'MechVent', 'Na', 'NIDiasABP', 'NIMAP', 'NISysABP', 'PaCO2', 'PaO2', 'pH', 'Platelets', 'RespRate',
'SaO2', 'SysABP', 'Temp', 'TroponinI', 'TroponinT', 'Urine', 'WBC'
]
params_dict = {k: i for i, k in enumerate(params)}
labels = [ "SAPS-I", "SOFA", "Length_of_stay", "Survival", "In-hospital_death" ]
labels_dict = {k: i for i, k in enumerate(labels)}
def __init__(self, root, train=True, download=False,
quantization = 0.1, n_samples = None, device = torch.device("cpu")):
self.root = root
self.train = train
self.reduce = "average"
self.quantization = quantization
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found. You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
if device == torch.device("cpu"):
self.data = torch.load(os.path.join(self.processed_folder, data_file), map_location='cpu')
self.labels = torch.load(os.path.join(self.processed_folder, self.label_file), map_location='cpu')
else:
self.data = torch.load(os.path.join(self.processed_folder, data_file))
self.labels = torch.load(os.path.join(self.processed_folder, self.label_file))
if n_samples is not None:
self.data = self.data[:n_samples]
self.labels = self.labels[:n_samples]
def download(self):
if self._check_exists():
return
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# Download outcome data
for url in self.outcome_urls:
filename = url.rpartition('/')[2]
download_url(url, self.raw_folder, filename, None)
txtfile = os.path.join(self.raw_folder, filename)
with open(txtfile) as f:
lines = f.readlines()
outcomes = {}
for l in lines[1:]:
l = l.rstrip().split(',')
record_id, labels = l[0], np.array(l[1:]).astype(float)
outcomes[record_id] = torch.Tensor(labels).to(self.device)
torch.save(
labels,
os.path.join(self.processed_folder, filename.split('.')[0] + '.pt')
)
for url in self.urls:
filename = url.rpartition('/')[2]
download_url(url, self.raw_folder, filename, None)
tar = tarfile.open(os.path.join(self.raw_folder, filename), "r:gz")
tar.extractall(self.raw_folder)
tar.close()
print('Processing {}...'.format(filename))
dirname = os.path.join(self.raw_folder, filename.split('.')[0])
patients = []
total = 0
for txtfile in os.listdir(dirname):
record_id = txtfile.split('.')[0]
with open(os.path.join(dirname, txtfile)) as f:
lines = f.readlines()
prev_time = 0
tt = [0.]
vals = [torch.zeros(len(self.params)).to(self.device)]
mask = [torch.zeros(len(self.params)).to(self.device)]
nobs = [torch.zeros(len(self.params))]
for l in lines[1:]:
total += 1
time, param, val = l.split(',')
# Time in hours
time = float(time.split(':')[0]) + float(time.split(':')[1]) / 60.
# round up the time stamps (up to 6 min by default)
# used for speed -- we actually don't need to quantize it in Latent ODE
time = round(time / self.quantization) * self.quantization
if time != prev_time:
tt.append(time)
vals.append(torch.zeros(len(self.params)).to(self.device))
mask.append(torch.zeros(len(self.params)).to(self.device))
nobs.append(torch.zeros(len(self.params)).to(self.device))
prev_time = time
if param in self.params_dict:
#vals[-1][self.params_dict[param]] = float(val)
n_observations = nobs[-1][self.params_dict[param]]
if self.reduce == 'average' and n_observations > 0:
prev_val = vals[-1][self.params_dict[param]]
new_val = (prev_val * n_observations + float(val)) / (n_observations + 1)
vals[-1][self.params_dict[param]] = new_val
else:
vals[-1][self.params_dict[param]] = float(val)
mask[-1][self.params_dict[param]] = 1
nobs[-1][self.params_dict[param]] += 1
else:
assert param == 'RecordID', 'Read unexpected param {}'.format(param)
tt = torch.tensor(tt).to(self.device)
vals = torch.stack(vals)
mask = torch.stack(mask)
labels = None
if record_id in outcomes:
# Only training set has labels
labels = outcomes[record_id]
# Out of 5 label types provided for Physionet, take only the last one -- mortality
labels = labels[4]
patients.append((record_id, tt, vals, mask, labels))
torch.save(
patients,
os.path.join(self.processed_folder,
filename.split('.')[0] + "_" + str(self.quantization) + '.pt')
)
print('Done!')
def _check_exists(self):
for url in self.urls:
filename = url.rpartition('/')[2]
if not os.path.exists(
os.path.join(self.processed_folder,
filename.split('.')[0] + "_" + str(self.quantization) + '.pt')
):
return False
return True
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def training_file(self):
return 'set-a_{}.pt'.format(self.quantization)
@property
def test_file(self):
return 'set-b_{}.pt'.format(self.quantization)
@property
def label_file(self):
return 'Outcomes-a.pt'
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def get_label(self, record_id):
return self.labels[record_id]
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Split: {}\n'.format('train' if self.train is True else 'test')
fmt_str += ' Root Location: {}\n'.format(self.root)
fmt_str += ' Quantization: {}\n'.format(self.quantization)
fmt_str += ' Reduce: {}\n'.format(self.reduce)
return fmt_str
def visualize(self, timesteps, data, mask, plot_name):
width = 15
height = 15
non_zero_attributes = (torch.sum(mask,0) > 2).numpy()
non_zero_idx = [i for i in range(len(non_zero_attributes)) if non_zero_attributes[i] == 1.]
n_non_zero = sum(non_zero_attributes)
mask = mask[:, non_zero_idx]
data = data[:, non_zero_idx]
params_non_zero = [self.params[i] for i in non_zero_idx]
params_dict = {k: i for i, k in enumerate(params_non_zero)}
n_col = 3
n_row = n_non_zero // n_col + (n_non_zero % n_col > 0)
fig, ax_list = plt.subplots(n_row, n_col, figsize=(width, height), facecolor='white')
#for i in range(len(self.params)):
for i in range(n_non_zero):
param = params_non_zero[i]
param_id = params_dict[param]
tp_mask = mask[:,param_id].long()
tp_cur_param = timesteps[tp_mask == 1.]
data_cur_param = data[tp_mask == 1., param_id]
ax_list[i // n_col, i % n_col].plot(tp_cur_param.numpy(), data_cur_param.numpy(), marker='o')
ax_list[i // n_col, i % n_col].set_title(param)
fig.tight_layout()
fig.savefig(plot_name)
plt.close(fig)
def variable_time_collate_fn(batch, args, device = torch.device("cpu"), data_type = "train",
data_min = None, data_max = None):
"""
Expects a batch of time series data in the form of (record_id, tt, vals, mask, labels) where
- record_id is a patient id
- tt is a 1-dimensional tensor containing T time values of observations.
- vals is a (T, D) tensor containing observed values for D variables.
- mask is a (T, D) tensor containing 1 where values were observed and 0 otherwise.
- labels is a list of labels for the current patient, if labels are available. Otherwise None.
Returns:
combined_tt: The union of all time observations.
combined_vals: (M, T, D) tensor containing the observed values.
combined_mask: (M, T, D) tensor containing 1 where values were observed and 0 otherwise.
"""
D = batch[0][2].shape[1]
combined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)
combined_tt = combined_tt.to(device)
offset = 0
combined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)
combined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)
combined_labels = None
N_labels = 1
combined_labels = torch.zeros(len(batch), N_labels) + torch.tensor(float('nan'))
combined_labels = combined_labels.to(device = device)
for b, (record_id, tt, vals, mask, labels) in enumerate(batch):
tt = tt.to(device)
vals = vals.to(device)
mask = mask.to(device)
if labels is not None:
labels = labels.to(device)
indices = inverse_indices[offset:offset + len(tt)]
offset += len(tt)
combined_vals[b, indices] = vals
combined_mask[b, indices] = mask
if labels is not None:
combined_labels[b] = labels
combined_vals, _, _ = utils.normalize_masked_data(combined_vals, combined_mask,
att_min = data_min, att_max = data_max)
if torch.max(combined_tt) != 0.:
combined_tt = combined_tt / torch.max(combined_tt)
data_dict = {
"data": combined_vals,
"time_steps": combined_tt,
"mask": combined_mask,
"labels": combined_labels}
data_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)
return data_dict
if __name__ == '__main__':
torch.manual_seed(1991)
dataset = PhysioNet('data/physionet', train=False, download=True)
dataloader = DataLoader(dataset, batch_size=10, shuffle=True, collate_fn=variable_time_collate_fn)
print(dataloader.__iter__().next())
| 11,603 | 31.233333 | 114 | py |
steer | steer-master/latent_ode/run_models.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
import matplotlib.pyplot as plt
import time
import datetime
import argparse
import numpy as np
import pandas as pd
from random import SystemRandom
from sklearn import model_selection
import torch
import torch.nn as nn
from torch.nn.functional import relu
import torch.optim as optim
import lib.utils as utils
from lib.plotting import *
from lib.rnn_baselines import *
from lib.ode_rnn import *
from lib.create_latent_ode_model import create_LatentODE_model
from lib.parse_datasets import parse_datasets
from lib.ode_func import ODEFunc, ODEFunc_w_Poisson
from lib.diffeq_solver import DiffeqSolver
from mujoco_physics import HopperPhysics
from lib.utils import compute_loss_all_batches
# Generative model for noisy data based on ODE
parser = argparse.ArgumentParser('Latent ODE')
parser.add_argument('-n', type=int, default=100, help="Size of the dataset")
parser.add_argument('--niters', type=int, default=300)
parser.add_argument('--lr', type=float, default=1e-2, help="Starting learning rate.")
parser.add_argument('-b', '--batch-size', type=int, default=50)
parser.add_argument('--viz', action='store_true', help="Show plots while training")
parser.add_argument('--save', type=str, default='experiments/', help="Path for save checkpoints")
parser.add_argument('--load', type=str, default=None, help="ID of the experiment to load for evaluation. If None, run a new experiment.")
parser.add_argument('-r', '--random-seed', type=int, default=1991, help="Random_seed")
parser.add_argument('--dataset', type=str, default='periodic', help="Dataset to load. Available: physionet, activity, hopper, periodic")
parser.add_argument('-s', '--sample-tp', type=float, default=None, help="Number of time points to sub-sample."
"If > 1, subsample exact number of points. If the number is in [0,1], take a percentage of available points per time series. If None, do not subsample")
parser.add_argument('-c', '--cut-tp', type=int, default=None, help="Cut out the section of the timeline of the specified length (in number of points)."
"Used for periodic function demo.")
parser.add_argument('--quantization', type=float, default=0.1, help="Quantization on the physionet dataset."
"Value 1 means quantization by 1 hour, value 0.1 means quantization by 0.1 hour = 6 min")
parser.add_argument('--latent-ode', action='store_true', help="Run Latent ODE seq2seq model")
parser.add_argument('--z0-encoder', type=str, default='odernn', help="Type of encoder for Latent ODE model: odernn or rnn")
parser.add_argument('--classic-rnn', action='store_true', help="Run RNN baseline: classic RNN that sees true points at every point. Used for interpolation only.")
parser.add_argument('--rnn-cell', default="gru", help="RNN Cell type. Available: gru (default), expdecay")
parser.add_argument('--input-decay', action='store_true', help="For RNN: use the input that is the weighted average of impirical mean and previous value (like in GRU-D)")
parser.add_argument('--ode-rnn', action='store_true', help="Run ODE-RNN baseline: RNN-style that sees true points at every point. Used for interpolation only.")
parser.add_argument('--rnn-vae', action='store_true', help="Run RNN baseline: seq2seq model with sampling of the h0 and ELBO loss.")
parser.add_argument('-l', '--latents', type=int, default=6, help="Size of the latent state")
parser.add_argument('--rec-dims', type=int, default=20, help="Dimensionality of the recognition model (ODE or RNN).")
parser.add_argument('--experimentID', type=int, default=7, help="Dimensionality of the recognition model (ODE or RNN).")
parser.add_argument('--rec-layers', type=int, default=1, help="Number of layers in ODE func in recognition ODE")
parser.add_argument('--gen-layers', type=int, default=1, help="Number of layers in ODE func in generative ODE")
parser.add_argument('-u', '--units', type=int, default=100, help="Number of units per layer in ODE func")
parser.add_argument('-g', '--gru-units', type=int, default=100, help="Number of units per layer in each of GRU update networks")
parser.add_argument('--poisson', action='store_true', help="Model poisson-process likelihood for the density of events in addition to reconstruction.")
parser.add_argument('--classif', action='store_true', help="Include binary classification loss -- used for Physionet dataset for hospiral mortality")
parser.add_argument('--linear-classif', action='store_true', help="If using a classifier, use a linear classifier instead of 1-layer NN")
parser.add_argument('--extrap', action='store_true', help="Set extrapolation mode. If this flag is not set, run interpolation mode.")
parser.add_argument('-t', '--timepoints', type=int, default=100, help="Total number of time-points")
parser.add_argument('--max-t', type=float, default=5., help="We subsample points in the interval [0, args.max_tp]")
parser.add_argument('--noise-weight', type=float, default=0.01, help="Noise amplitude for generated traejctories")
args = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
file_name = os.path.basename(__file__)[:-3]
utils.makedirs(args.save)
#####################################################################################################
if __name__ == '__main__':
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
experimentID = args.load
if experimentID is None:
# Make a new experiment ID
experimentID = args.experimentID #int(SystemRandom().random()*100000)
ckpt_path = os.path.join(args.save, "experiment_" + str(experimentID) + '.ckpt')
start = time.time()
print("Sampling dataset of {} training examples".format(args.n))
input_command = sys.argv
ind = [i for i in range(len(input_command)) if input_command[i] == "--load"]
if len(ind) == 1:
ind = ind[0]
input_command = input_command[:ind] + input_command[(ind+2):]
input_command = " ".join(input_command)
utils.makedirs("results/")
##################################################################
data_obj = parse_datasets(args, device)
input_dim = data_obj["input_dim"]
classif_per_tp = False
if ("classif_per_tp" in data_obj):
# do classification per time point rather than on a time series as a whole
classif_per_tp = data_obj["classif_per_tp"]
if args.classif and (args.dataset == "hopper" or args.dataset == "periodic"):
raise Exception("Classification task is not available for MuJoCo and 1d datasets")
n_labels = 1
if args.classif:
if ("n_labels" in data_obj):
n_labels = data_obj["n_labels"]
else:
raise Exception("Please provide number of labels for classification task")
##################################################################
# Create the model
obsrv_std = 0.01
if args.dataset == "hopper":
obsrv_std = 1e-3
obsrv_std = torch.Tensor([obsrv_std]).to(device)
z0_prior = Normal(torch.Tensor([0.0]).to(device), torch.Tensor([1.]).to(device))
if args.rnn_vae:
if args.poisson:
print("Poisson process likelihood not implemented for RNN-VAE: ignoring --poisson")
# Create RNN-VAE model
model = RNN_VAE(input_dim, args.latents,
device = device,
rec_dims = args.rec_dims,
concat_mask = True,
obsrv_std = obsrv_std,
z0_prior = z0_prior,
use_binary_classif = args.classif,
classif_per_tp = classif_per_tp,
linear_classifier = args.linear_classif,
n_units = args.units,
input_space_decay = args.input_decay,
cell = args.rnn_cell,
n_labels = n_labels,
train_classif_w_reconstr = (args.dataset == "physionet")
).to(device)
elif args.classic_rnn:
if args.poisson:
print("Poisson process likelihood not implemented for RNN: ignoring --poisson")
if args.extrap:
raise Exception("Extrapolation for standard RNN not implemented")
# Create RNN model
model = Classic_RNN(input_dim, args.latents, device,
concat_mask = True, obsrv_std = obsrv_std,
n_units = args.units,
use_binary_classif = args.classif,
classif_per_tp = classif_per_tp,
linear_classifier = args.linear_classif,
input_space_decay = args.input_decay,
cell = args.rnn_cell,
n_labels = n_labels,
train_classif_w_reconstr = (args.dataset == "physionet")
).to(device)
elif args.ode_rnn:
# Create ODE-GRU model
n_ode_gru_dims = args.latents
if args.poisson:
print("Poisson process likelihood not implemented for ODE-RNN: ignoring --poisson")
if args.extrap:
raise Exception("Extrapolation for ODE-RNN not implemented")
ode_func_net = utils.create_net(n_ode_gru_dims, n_ode_gru_dims,
n_layers = args.rec_layers, n_units = args.units, nonlinear = nn.Tanh)
rec_ode_func = ODEFunc(
input_dim = input_dim,
latent_dim = n_ode_gru_dims,
ode_func_net = ode_func_net,
device = device).to(device)
z0_diffeq_solver = DiffeqSolver(input_dim, rec_ode_func, "euler", args.latents,
odeint_rtol = 1e-3, odeint_atol = 1e-4, device = device)
model = ODE_RNN(input_dim, n_ode_gru_dims, device = device,
z0_diffeq_solver = z0_diffeq_solver, n_gru_units = args.gru_units,
concat_mask = True, obsrv_std = obsrv_std,
use_binary_classif = args.classif,
classif_per_tp = classif_per_tp,
n_labels = n_labels,
train_classif_w_reconstr = (args.dataset == "physionet")
).to(device)
elif args.latent_ode:
model = create_LatentODE_model(args, input_dim, z0_prior, obsrv_std, device,
classif_per_tp = classif_per_tp,
n_labels = n_labels)
else:
raise Exception("Model not specified")
##################################################################
if args.viz:
viz = Visualizations(device)
##################################################################
#Load checkpoint and evaluate the model
if args.load is not None:
utils.get_ckpt_model(ckpt_path, model, device)
exit()
##################################################################
# Training
log_path = "logs/" + file_name + "_" + str(experimentID) + ".log"
if not os.path.exists("logs/"):
utils.makedirs("logs/")
logger = utils.get_logger(logpath=log_path, filepath=os.path.abspath(__file__))
logger.info(input_command)
optimizer = optim.Adamax(model.parameters(), lr=args.lr)
num_batches = data_obj["n_train_batches"]
for itr in range(1, num_batches * (args.niters + 1)):
optimizer.zero_grad()
utils.update_learning_rate(optimizer, decay_rate = 0.999, lowest = args.lr / 10)
wait_until_kl_inc = 10
if itr // num_batches < wait_until_kl_inc:
kl_coef = 0.
else:
kl_coef = (1-0.99** (itr // num_batches - wait_until_kl_inc))
batch_dict = utils.get_next_batch(data_obj["train_dataloader"])
train_res = model.compute_all_losses(batch_dict, n_traj_samples = 3, kl_coef = kl_coef)
train_res["loss"].backward()
optimizer.step()
n_iters_to_viz = 1
if itr % (n_iters_to_viz * num_batches) == 0:
with torch.no_grad():
test_res = compute_loss_all_batches(model,
data_obj["test_dataloader"], args,
n_batches = data_obj["n_test_batches"],
experimentID = experimentID,
device = device,
n_traj_samples = 3, kl_coef = kl_coef)
message = 'Epoch {:04d} [Test seq (cond on sampled tp)] | Loss {:.6f} | Likelihood {:.6f} | KL fp {:.4f} | FP STD {:.4f}|'.format(
itr//num_batches,
test_res["loss"].detach(), test_res["likelihood"].detach(),
test_res["kl_first_p"], test_res["std_first_p"])
logger.info("Experiment " + str(experimentID))
logger.info(message)
logger.info("KL coef: {}".format(kl_coef))
logger.info("Train loss (one batch): {}".format(train_res["loss"].detach()))
logger.info("Train CE loss (one batch): {}".format(train_res["ce_loss"].detach()))
if "auc" in test_res:
logger.info("Classification AUC (TEST): {:.4f}".format(test_res["auc"]))
if "mse" in test_res:
logger.info("Test MSE: {:.4f}".format(test_res["mse"]))
if "accuracy" in train_res:
logger.info("Classification accuracy (TRAIN): {:.4f}".format(train_res["accuracy"]))
if "accuracy" in test_res:
logger.info("Classification accuracy (TEST): {:.4f}".format(test_res["accuracy"]))
if "pois_likelihood" in test_res:
logger.info("Poisson likelihood: {}".format(test_res["pois_likelihood"]))
if "ce_loss" in test_res:
logger.info("CE loss: {}".format(test_res["ce_loss"]))
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, ckpt_path)
# Plotting
if args.viz:
with torch.no_grad():
test_dict = utils.get_next_batch(data_obj["test_dataloader"])
print("plotting....")
if isinstance(model, LatentODE) and (args.dataset == "periodic"): #and not args.classic_rnn and not args.ode_rnn:
plot_id = itr // num_batches // n_iters_to_viz
viz.draw_all_plots_one_dim(test_dict, model,
plot_name = file_name + "_" + str(experimentID) + "_{:03d}".format(plot_id) + ".png",
experimentID = experimentID, save=True)
plt.pause(0.01)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, ckpt_path)
| 13,141 | 38.584337 | 170 | py |
steer | steer-master/latent_ode/lib/rnn_baselines.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.nn.modules.rnn import GRUCell, LSTMCell, RNNCellBase
from torch.distributions.normal import Normal
from torch.distributions import Independent
from torch.nn.parameter import Parameter
from lib.base_models import Baseline, VAE_Baseline
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Exponential decay of the hidden states for RNN
# adapted from GRU-D implementation: https://github.com/zhiyongc/GRU-D/
# Exp decay between hidden states
class GRUCellExpDecay(RNNCellBase):
def __init__(self, input_size, input_size_for_decay, hidden_size, device, bias=True):
super(GRUCellExpDecay, self).__init__(input_size, hidden_size, bias, num_chunks=3)
self.device = device
self.input_size_for_decay = input_size_for_decay
self.decay = nn.Sequential(nn.Linear(input_size_for_decay, 1),)
utils.init_network_weights(self.decay)
def gru_exp_decay_cell(self, input, hidden, w_ih, w_hh, b_ih, b_hh):
# INPORTANT: assumes that cum delta t is the last dimension of the input
batch_size, n_dims = input.size()
# "input" contains the data, mask and also cumulative deltas for all inputs
cum_delta_ts = input[:, -self.input_size_for_decay:]
data = input[:, :-self.input_size_for_decay]
decay = torch.exp( - torch.min(torch.max(
torch.zeros([1]).to(self.device), self.decay(cum_delta_ts)),
torch.ones([1]).to(self.device) * 1000 ))
hidden = hidden * decay
gi = torch.mm(data, w_ih.t()) + b_ih
gh = torch.mm(hidden, w_hh.t()) + b_hh
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate = torch.sigmoid(i_r + h_r)
inputgate = torch.sigmoid(i_i + h_i)
newgate = torch.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
def forward(self, input, hx=None):
# type: (Tensor, Optional[Tensor]) -> Tensor
#self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
#self.check_forward_hidden(input, hx, '')
return self.gru_exp_decay_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh
)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Imputation with a weighed average of previous value and empirical mean
# adapted from GRU-D implementation: https://github.com/zhiyongc/GRU-D/
def get_cum_delta_ts(data, delta_ts, mask):
n_traj, n_tp, n_dims = data.size()
cum_delta_ts = delta_ts.repeat(1, 1, n_dims)
missing_index = np.where(mask.cpu().numpy() == 0)
for idx in range(missing_index[0].shape[0]):
i = missing_index[0][idx]
j = missing_index[1][idx]
k = missing_index[2][idx]
if j != 0 and j != (n_tp-1):
cum_delta_ts[i,j+1,k] = cum_delta_ts[i,j+1,k] + cum_delta_ts[i,j,k]
cum_delta_ts = cum_delta_ts / cum_delta_ts.max() # normalize
return cum_delta_ts
# adapted from GRU-D implementation: https://github.com/zhiyongc/GRU-D/
# very slow
def impute_using_input_decay(data, delta_ts, mask, w_input_decay, b_input_decay):
n_traj, n_tp, n_dims = data.size()
cum_delta_ts = delta_ts.repeat(1, 1, n_dims)
missing_index = np.where(mask.cpu().numpy() == 0)
data_last_obsv = np.copy(data.cpu().numpy())
for idx in range(missing_index[0].shape[0]):
i = missing_index[0][idx]
j = missing_index[1][idx]
k = missing_index[2][idx]
if j != 0 and j != (n_tp-1):
cum_delta_ts[i,j+1,k] = cum_delta_ts[i,j+1,k] + cum_delta_ts[i,j,k]
if j != 0:
data_last_obsv[i,j,k] = data_last_obsv[i,j-1,k] # last observation
cum_delta_ts = cum_delta_ts / cum_delta_ts.max() # normalize
data_last_obsv = torch.Tensor(data_last_obsv).to(get_device(data))
zeros = torch.zeros([n_traj, n_tp, n_dims]).to(get_device(data))
decay = torch.exp( - torch.min( torch.max(zeros,
w_input_decay * cum_delta_ts + b_input_decay), zeros + 1000 ))
data_means = torch.mean(data, 1).unsqueeze(1)
data_imputed = data * mask + (1-mask) * (decay * data_last_obsv + (1-decay) * data_means)
return data_imputed
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def run_rnn(inputs, delta_ts, cell, first_hidden=None,
mask = None, feed_previous=False, n_steps=0,
decoder = None, input_decay_params = None,
feed_previous_w_prob = 0.,
masked_update = True):
if (feed_previous or feed_previous_w_prob) and decoder is None:
raise Exception("feed_previous is set to True -- please specify RNN decoder")
if n_steps == 0:
n_steps = inputs.size(1)
if (feed_previous or feed_previous_w_prob) and mask is None:
mask = torch.ones((inputs.size(0), n_steps, inputs.size(-1))).to(get_device(inputs))
if isinstance(cell, GRUCellExpDecay):
cum_delta_ts = get_cum_delta_ts(inputs, delta_ts, mask)
if input_decay_params is not None:
w_input_decay, b_input_decay = input_decay_params
inputs = impute_using_input_decay(inputs, delta_ts, mask,
w_input_decay, b_input_decay)
all_hiddens = []
hidden = first_hidden
if hidden is not None:
all_hiddens.append(hidden)
n_steps -= 1
for i in range(n_steps):
delta_t = delta_ts[:,i]
if i == 0:
rnn_input = inputs[:,i]
elif feed_previous:
rnn_input = decoder(hidden)
elif feed_previous_w_prob > 0:
feed_prev = np.random.uniform() > feed_previous_w_prob
if feed_prev:
rnn_input = decoder(hidden)
else:
rnn_input = inputs[:,i]
else:
rnn_input = inputs[:,i]
if mask is not None:
mask_i = mask[:,i,:]
rnn_input = torch.cat((rnn_input, mask_i), -1)
if isinstance(cell, GRUCellExpDecay):
cum_delta_t = cum_delta_ts[:,i]
input_w_t = torch.cat((rnn_input, cum_delta_t), -1).squeeze(1)
else:
input_w_t = torch.cat((rnn_input, delta_t), -1).squeeze(1)
prev_hidden = hidden
hidden = cell(input_w_t, hidden)
if masked_update and (mask is not None) and (prev_hidden is not None):
# update only the hidden states for hidden state only if at least one feature is present for the current time point
summed_mask = (torch.sum(mask_i, -1, keepdim = True) > 0).float()
assert(not torch.isnan(summed_mask).any())
hidden = summed_mask * hidden + (1-summed_mask) * prev_hidden
all_hiddens.append(hidden)
all_hiddens = torch.stack(all_hiddens, 0)
all_hiddens = all_hiddens.permute(1,0,2).unsqueeze(0)
return hidden, all_hiddens
class Classic_RNN(Baseline):
def __init__(self, input_dim, latent_dim, device,
concat_mask = False, obsrv_std = 0.1,
use_binary_classif = False,
linear_classifier = False,
classif_per_tp = False,
input_space_decay = False,
cell = "gru", n_units = 100,
n_labels = 1,
train_classif_w_reconstr = False):
super(Classic_RNN, self).__init__(input_dim, latent_dim, device,
obsrv_std = obsrv_std,
use_binary_classif = use_binary_classif,
classif_per_tp = classif_per_tp,
linear_classifier = linear_classifier,
n_labels = n_labels,
train_classif_w_reconstr = train_classif_w_reconstr)
self.concat_mask = concat_mask
encoder_dim = int(input_dim)
if concat_mask:
encoder_dim = encoder_dim * 2
self.decoder = nn.Sequential(
nn.Linear(latent_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, input_dim),)
#utils.init_network_weights(self.encoder)
utils.init_network_weights(self.decoder)
if cell == "gru":
self.rnn_cell = GRUCell(encoder_dim + 1, latent_dim) # +1 for delta t
elif cell == "expdecay":
self.rnn_cell = GRUCellExpDecay(
input_size = encoder_dim,
input_size_for_decay = input_dim,
hidden_size = latent_dim,
device = device)
else:
raise Exception("Unknown RNN cell: {}".format(cell))
if input_space_decay:
self.w_input_decay = Parameter(torch.Tensor(1, int(input_dim))).to(self.device)
self.b_input_decay = Parameter(torch.Tensor(1, int(input_dim))).to(self.device)
self.input_space_decay = input_space_decay
self.z0_net = lambda hidden_state: hidden_state
def get_reconstruction(self, time_steps_to_predict, data, truth_time_steps,
mask = None, n_traj_samples = 1, mode = None):
assert(mask is not None)
n_traj, n_tp, n_dims = data.size()
if (len(truth_time_steps) != len(time_steps_to_predict)) or (torch.sum(time_steps_to_predict - truth_time_steps) != 0):
raise Exception("Extrapolation mode not implemented for RNN models")
# for classic RNN time_steps_to_predict should be the same as truth_time_steps
assert(len(truth_time_steps) == len(time_steps_to_predict))
batch_size = data.size(0)
zero_delta_t = torch.Tensor([0.]).to(self.device)
delta_ts = truth_time_steps[1:] - truth_time_steps[:-1]
delta_ts = torch.cat((delta_ts, zero_delta_t))
if len(delta_ts.size()) == 1:
# delta_ts are shared for all trajectories in a batch
assert(data.size(1) == delta_ts.size(0))
delta_ts = delta_ts.unsqueeze(-1).repeat((batch_size,1,1))
input_decay_params = None
if self.input_space_decay:
input_decay_params = (self.w_input_decay, self.b_input_decay)
if mask is not None:
utils.check_mask(data, mask)
hidden_state, all_hiddens = run_rnn(data, delta_ts,
cell = self.rnn_cell, mask = mask,
input_decay_params = input_decay_params,
feed_previous_w_prob = (0. if self.use_binary_classif else 0.5),
decoder = self.decoder)
outputs = self.decoder(all_hiddens)
# Shift outputs for computing the loss -- we should compare the first output to the second data point, etc.
first_point = data[:,0,:]
outputs = utils.shift_outputs(outputs, first_point)
extra_info = {"first_point": (hidden_state.unsqueeze(0), 0.0, hidden_state.unsqueeze(0))}
if self.use_binary_classif:
if self.classif_per_tp:
extra_info["label_predictions"] = self.classifier(all_hiddens)
else:
extra_info["label_predictions"] = self.classifier(hidden_state).reshape(1,-1)
# outputs shape: [n_traj_samples, n_traj, n_tp, n_dims]
return outputs, extra_info
class RNN_VAE(VAE_Baseline):
def __init__(self, input_dim, latent_dim, rec_dims,
z0_prior, device,
concat_mask = False, obsrv_std = 0.1,
input_space_decay = False,
use_binary_classif = False,
classif_per_tp =False,
linear_classifier = False,
cell = "gru", n_units = 100,
n_labels = 1,
train_classif_w_reconstr = False):
super(RNN_VAE, self).__init__(
input_dim = input_dim, latent_dim = latent_dim,
z0_prior = z0_prior,
device = device, obsrv_std = obsrv_std,
use_binary_classif = use_binary_classif,
classif_per_tp = classif_per_tp,
linear_classifier = linear_classifier,
n_labels = n_labels,
train_classif_w_reconstr = train_classif_w_reconstr)
self.concat_mask = concat_mask
encoder_dim = int(input_dim)
if concat_mask:
encoder_dim = encoder_dim * 2
if cell == "gru":
self.rnn_cell_enc = GRUCell(encoder_dim + 1, rec_dims) # +1 for delta t
self.rnn_cell_dec = GRUCell(encoder_dim + 1, latent_dim) # +1 for delta t
elif cell == "expdecay":
self.rnn_cell_enc = GRUCellExpDecay(
input_size = encoder_dim,
input_size_for_decay = input_dim,
hidden_size = rec_dims,
device = device)
self.rnn_cell_dec = GRUCellExpDecay(
input_size = encoder_dim,
input_size_for_decay = input_dim,
hidden_size = latent_dim,
device = device)
else:
raise Exception("Unknown RNN cell: {}".format(cell))
self.z0_net = nn.Sequential(
nn.Linear(rec_dims, n_units),
nn.Tanh(),
nn.Linear(n_units, latent_dim * 2),)
utils.init_network_weights(self.z0_net)
self.decoder = nn.Sequential(
nn.Linear(latent_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, input_dim),)
#utils.init_network_weights(self.encoder)
utils.init_network_weights(self.decoder)
if input_space_decay:
self.w_input_decay = Parameter(torch.Tensor(1, int(input_dim))).to(self.device)
self.b_input_decay = Parameter(torch.Tensor(1, int(input_dim))).to(self.device)
self.input_space_decay = input_space_decay
def get_reconstruction(self, time_steps_to_predict, data, truth_time_steps,
mask = None, n_traj_samples = 1, mode = None):
assert(mask is not None)
batch_size = data.size(0)
zero_delta_t = torch.Tensor([0.]).to(self.device)
# run encoder backwards
run_backwards = bool(time_steps_to_predict[0] < truth_time_steps[-1])
if run_backwards:
# Look at data in the reverse order: from later points to the first
data = utils.reverse(data)
mask = utils.reverse(mask)
delta_ts = truth_time_steps[1:] - truth_time_steps[:-1]
if run_backwards:
# we are going backwards in time
delta_ts = utils.reverse(delta_ts)
delta_ts = torch.cat((delta_ts, zero_delta_t))
if len(delta_ts.size()) == 1:
# delta_ts are shared for all trajectories in a batch
assert(data.size(1) == delta_ts.size(0))
delta_ts = delta_ts.unsqueeze(-1).repeat((batch_size,1,1))
input_decay_params = None
if self.input_space_decay:
input_decay_params = (self.w_input_decay, self.b_input_decay)
hidden_state, _ = run_rnn(data, delta_ts,
cell = self.rnn_cell_enc, mask = mask,
input_decay_params = input_decay_params)
z0_mean, z0_std = utils.split_last_dim(self.z0_net(hidden_state))
z0_std = z0_std.abs()
z0_sample = utils.sample_standard_gaussian(z0_mean, z0_std)
# Decoder # # # # # # # # # # # # # # # # # # # #
delta_ts = torch.cat((zero_delta_t, time_steps_to_predict[1:] - time_steps_to_predict[:-1]))
if len(delta_ts.size()) == 1:
delta_ts = delta_ts.unsqueeze(-1).repeat((batch_size,1,1))
_, all_hiddens = run_rnn(data, delta_ts,
cell = self.rnn_cell_dec,
first_hidden = z0_sample, feed_previous = True,
n_steps = time_steps_to_predict.size(0),
decoder = self.decoder,
input_decay_params = input_decay_params)
outputs = self.decoder(all_hiddens)
# Shift outputs for computing the loss -- we should compare the first output to the second data point, etc.
first_point = data[:,0,:]
outputs = utils.shift_outputs(outputs, first_point)
extra_info = {"first_point": (z0_mean.unsqueeze(0), z0_std.unsqueeze(0), z0_sample.unsqueeze(0))}
if self.use_binary_classif:
if self.classif_per_tp:
extra_info["label_predictions"] = self.classifier(all_hiddens)
else:
extra_info["label_predictions"] = self.classifier(z0_mean).reshape(1,-1)
# outputs shape: [n_traj_samples, n_traj, n_tp, n_dims]
return outputs, extra_info
| 14,730 | 32.177928 | 121 | py |
steer | steer-master/latent_ode/lib/create_latent_ode_model.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.latent_ode import LatentODE
from lib.encoder_decoder import *
from lib.diffeq_solver import DiffeqSolver
from torch.distributions.normal import Normal
from lib.ode_func import ODEFunc, ODEFunc_w_Poisson
#####################################################################################################
def create_LatentODE_model(args, input_dim, z0_prior, obsrv_std, device,
classif_per_tp = False, n_labels = 1):
dim = args.latents
if args.poisson:
lambda_net = utils.create_net(dim, input_dim,
n_layers = 1, n_units = args.units, nonlinear = nn.Tanh)
# ODE function produces the gradient for latent state and for poisson rate
ode_func_net = utils.create_net(dim * 2, args.latents * 2,
n_layers = args.gen_layers, n_units = args.units, nonlinear = nn.Tanh)
gen_ode_func = ODEFunc_w_Poisson(
input_dim = input_dim,
latent_dim = args.latents * 2,
ode_func_net = ode_func_net,
lambda_net = lambda_net,
device = device).to(device)
else:
dim = args.latents
ode_func_net = utils.create_net(dim, args.latents,
n_layers = args.gen_layers, n_units = args.units, nonlinear = nn.Tanh)
gen_ode_func = ODEFunc(
input_dim = input_dim,
latent_dim = args.latents,
ode_func_net = ode_func_net,
device = device).to(device)
z0_diffeq_solver = None
n_rec_dims = args.rec_dims
enc_input_dim = int(input_dim) * 2 # we concatenate the mask
gen_data_dim = input_dim
z0_dim = args.latents
if args.poisson:
z0_dim += args.latents # predict the initial poisson rate
if args.z0_encoder == "odernn":
ode_func_net = utils.create_net(n_rec_dims, n_rec_dims,
n_layers = args.rec_layers, n_units = args.units, nonlinear = nn.Tanh)
rec_ode_func = ODEFunc(
input_dim = enc_input_dim,
latent_dim = n_rec_dims,
ode_func_net = ode_func_net,
device = device).to(device)
z0_diffeq_solver = DiffeqSolver(enc_input_dim, rec_ode_func, "euler", args.latents,
odeint_rtol = 1e-3, odeint_atol = 1e-4, device = device)
encoder_z0 = Encoder_z0_ODE_RNN(n_rec_dims, enc_input_dim, z0_diffeq_solver,
z0_dim = z0_dim, n_gru_units = args.gru_units, device = device).to(device)
elif args.z0_encoder == "rnn":
encoder_z0 = Encoder_z0_RNN(z0_dim, enc_input_dim,
lstm_output_size = n_rec_dims, device = device).to(device)
else:
raise Exception("Unknown encoder for Latent ODE model: " + args.z0_encoder)
decoder = Decoder(args.latents, gen_data_dim).to(device)
diffeq_solver = DiffeqSolver(gen_data_dim, gen_ode_func, 'dopri5', args.latents,
odeint_rtol = 1e-3, odeint_atol = 1e-4, device = device)
model = LatentODE(
input_dim = gen_data_dim,
latent_dim = args.latents,
encoder_z0 = encoder_z0,
decoder = decoder,
diffeq_solver = diffeq_solver,
z0_prior = z0_prior,
device = device,
obsrv_std = obsrv_std,
use_poisson_proc = args.poisson,
use_binary_classif = args.classif,
linear_classifier = args.linear_classif,
classif_per_tp = classif_per_tp,
n_labels = n_labels,
train_classif_w_reconstr = (args.dataset == "physionet")
).to(device)
return model
| 3,325 | 30.377358 | 101 | py |
steer | steer-master/latent_ode/lib/ode_rnn.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.nn.modules.rnn import GRUCell, LSTMCell, RNNCellBase
from torch.distributions.normal import Normal
from torch.distributions import Independent
from torch.nn.parameter import Parameter
from lib.base_models import Baseline
class ODE_RNN(Baseline):
def __init__(self, input_dim, latent_dim, device = torch.device("cpu"),
z0_diffeq_solver = None, n_gru_units = 100, n_units = 100,
concat_mask = False, obsrv_std = 0.1, use_binary_classif = False,
classif_per_tp = False, n_labels = 1, train_classif_w_reconstr = False):
Baseline.__init__(self, input_dim, latent_dim, device = device,
obsrv_std = obsrv_std, use_binary_classif = use_binary_classif,
classif_per_tp = classif_per_tp,
n_labels = n_labels,
train_classif_w_reconstr = train_classif_w_reconstr)
ode_rnn_encoder_dim = latent_dim
self.ode_gru = Encoder_z0_ODE_RNN(
latent_dim = ode_rnn_encoder_dim,
input_dim = (input_dim) * 2, # input and the mask
z0_diffeq_solver = z0_diffeq_solver,
n_gru_units = n_gru_units,
device = device).to(device)
self.z0_diffeq_solver = z0_diffeq_solver
self.decoder = nn.Sequential(
nn.Linear(latent_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, input_dim),)
utils.init_network_weights(self.decoder)
def get_reconstruction(self, time_steps_to_predict, data, truth_time_steps,
mask = None, n_traj_samples = None, mode = None):
if (len(truth_time_steps) != len(time_steps_to_predict)) or (torch.sum(time_steps_to_predict - truth_time_steps) != 0):
raise Exception("Extrapolation mode not implemented for ODE-RNN")
# time_steps_to_predict and truth_time_steps should be the same
assert(len(truth_time_steps) == len(time_steps_to_predict))
assert(mask is not None)
data_and_mask = data
if mask is not None:
data_and_mask = torch.cat([data, mask],-1)
_, _, latent_ys, _ = self.ode_gru.run_odernn(
data_and_mask, truth_time_steps, run_backwards = False)
latent_ys = latent_ys.permute(0,2,1,3)
last_hidden = latent_ys[:,:,-1,:]
#assert(torch.sum(int_lambda[0,0,-1,:] <= 0) == 0.)
outputs = self.decoder(latent_ys)
# Shift outputs for computing the loss -- we should compare the first output to the second data point, etc.
first_point = data[:,0,:]
outputs = utils.shift_outputs(outputs, first_point)
extra_info = {"first_point": (latent_ys[:,:,-1,:], 0.0, latent_ys[:,:,-1,:])}
if self.use_binary_classif:
if self.classif_per_tp:
extra_info["label_predictions"] = self.classifier(latent_ys)
else:
extra_info["label_predictions"] = self.classifier(last_hidden).squeeze(-1)
# outputs shape: [n_traj_samples, n_traj, n_tp, n_dims]
return outputs, extra_info
| 3,133 | 31.309278 | 121 | py |
steer | steer-master/latent_ode/lib/plotting.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import matplotlib
# matplotlib.use('TkAgg')
matplotlib.use('Agg')
import matplotlib.pyplot
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import os
from scipy.stats import kde
import numpy as np
import subprocess
import torch
import lib.utils as utils
import matplotlib.gridspec as gridspec
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.rnn_baselines import *
from lib.ode_rnn import *
import torch.nn.functional as functional
from torch.distributions.normal import Normal
from lib.latent_ode import LatentODE
from lib.likelihood_eval import masked_gaussian_log_density
try:
import umap
except:
print("Couldn't import umap")
from generate_timeseries import Periodic_1d
from person_activity import PersonActivity
from lib.utils import compute_loss_all_batches
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
LARGE_SIZE = 22
def init_fonts(main_font_size = LARGE_SIZE):
plt.rc('font', size=main_font_size) # controls default text sizes
plt.rc('axes', titlesize=main_font_size) # fontsize of the axes title
plt.rc('axes', labelsize=main_font_size - 2) # fontsize of the x and y labels
plt.rc('xtick', labelsize=main_font_size - 2) # fontsize of the tick labels
plt.rc('ytick', labelsize=main_font_size - 2) # fontsize of the tick labels
plt.rc('legend', fontsize=main_font_size - 2) # legend fontsize
plt.rc('figure', titlesize=main_font_size) # fontsize of the figure title
def plot_trajectories(ax, traj, time_steps, min_y = None, max_y = None, title = "",
add_to_plot = False, label = None, add_legend = False, dim_to_show = 0,
linestyle = '-', marker = 'o', mask = None, color = None, linewidth = 1):
# expected shape of traj: [n_traj, n_timesteps, n_dims]
# The function will produce one line per trajectory (n_traj lines in total)
if not add_to_plot:
ax.cla()
ax.set_title(title)
ax.set_xlabel('Time')
ax.set_ylabel('x')
if min_y is not None:
ax.set_ylim(bottom = min_y)
if max_y is not None:
ax.set_ylim(top = max_y)
for i in range(traj.size()[0]):
d = traj[i].cpu().numpy()[:, dim_to_show]
ts = time_steps.cpu().numpy()
if mask is not None:
m = mask[i].cpu().numpy()[:, dim_to_show]
d = d[m == 1]
ts = ts[m == 1]
ax.plot(ts, d, linestyle = linestyle, label = label, marker=marker, color = color, linewidth = linewidth)
if add_legend:
ax.legend()
def plot_std(ax, traj, traj_std, time_steps, min_y = None, max_y = None, title = "",
add_to_plot = False, label = None, alpha=0.2, color = None):
# take only the first (and only?) dimension
mean_minus_std = (traj - traj_std).cpu().numpy()[:, :, 0]
mean_plus_std = (traj + traj_std).cpu().numpy()[:, :, 0]
for i in range(traj.size()[0]):
ax.fill_between(time_steps.cpu().numpy(), mean_minus_std[i], mean_plus_std[i],
alpha=alpha, color = color)
def plot_vector_field(ax, odefunc, latent_dim, device):
# Code borrowed from https://github.com/rtqichen/ffjord/blob/29c016131b702b307ceb05c70c74c6e802bb8a44/diagnostics/viz_toy.py
K = 13j
y, x = np.mgrid[-6:6:K, -6:6:K]
K = int(K.imag)
zs = torch.from_numpy(np.stack([x, y], -1).reshape(K * K, 2)).to(device, torch.float32)
if latent_dim > 2:
# Plots dimensions 0 and 2
zs = torch.cat((zs, torch.zeros(K * K, latent_dim-2).to(device) ), 1)
dydt = odefunc(0, zs)
dydt = -dydt.cpu().detach().numpy()
if latent_dim > 2:
dydt = dydt[:,:2]
mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
dydt = (dydt / mag)
dydt = dydt.reshape(K, K, 2)
ax.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], #color = dydt[:, :, 0],
cmap="coolwarm", linewidth=2)
# ax.quiver(
# x, y, dydt[:, :, 0], dydt[:, :, 1],
# np.exp(logmag), cmap="coolwarm", pivot="mid", scale = 100,
# )
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
#ax.axis("off")
def get_meshgrid(npts, int_y1, int_y2):
min_y1, max_y1 = int_y1
min_y2, max_y2 = int_y2
y1_grid = np.linspace(min_y1, max_y1, npts)
y2_grid = np.linspace(min_y2, max_y2, npts)
xx, yy = np.meshgrid(y1_grid, y2_grid)
flat_inputs = np.concatenate((np.expand_dims(xx.flatten(),1), np.expand_dims(yy.flatten(),1)), 1)
flat_inputs = torch.from_numpy(flat_inputs).float()
return xx, yy, flat_inputs
def add_white(cmap):
cmaplist = [cmap(i) for i in range(cmap.N)]
# force the first color entry to be grey
cmaplist[0] = (1.,1.,1.,1.0)
# create the new map
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
return cmap
class Visualizations():
def __init__(self, device):
self.init_visualization()
init_fonts(SMALL_SIZE)
self.device = device
def init_visualization(self):
self.fig = plt.figure(figsize=(12, 7), facecolor='white')
self.ax_traj = []
for i in range(1,4):
self.ax_traj.append(self.fig.add_subplot(2,3,i, frameon=False))
# self.ax_density = []
# for i in range(4,7):
# self.ax_density.append(self.fig.add_subplot(3,3,i, frameon=False))
#self.ax_samples_same_traj = self.fig.add_subplot(3,3,7, frameon=False)
self.ax_latent_traj = self.fig.add_subplot(2,3,4, frameon=False)
self.ax_vector_field = self.fig.add_subplot(2,3,5, frameon=False)
self.ax_traj_from_prior = self.fig.add_subplot(2,3,6, frameon=False)
self.plot_limits = {}
plt.show(block=False)
def set_plot_lims(self, ax, name):
if name not in self.plot_limits:
self.plot_limits[name] = (ax.get_xlim(), ax.get_ylim())
return
xlim, ylim = self.plot_limits[name]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
def draw_one_density_plot(self, ax, model, data_dict, traj_id,
multiply_by_poisson = False):
scale = 5
cmap = add_white(plt.cm.get_cmap('Blues', 9)) # plt.cm.BuGn_r
cmap2 = add_white(plt.cm.get_cmap('Reds', 9)) # plt.cm.BuGn_r
#cmap = plt.cm.get_cmap('viridis')
data = data_dict["data_to_predict"]
time_steps = data_dict["tp_to_predict"]
mask = data_dict["mask_predicted_data"]
observed_data = data_dict["observed_data"]
observed_time_steps = data_dict["observed_tp"]
observed_mask = data_dict["observed_mask"]
npts = 50
xx, yy, z0_grid = get_meshgrid(npts = npts, int_y1 = (-scale,scale), int_y2 = (-scale,scale))
z0_grid = z0_grid.to(get_device(data))
if model.latent_dim > 2:
z0_grid = torch.cat((z0_grid, torch.zeros(z0_grid.size(0), model.latent_dim-2)), 1)
if model.use_poisson_proc:
n_traj, n_dims = z0_grid.size()
# append a vector of zeros to compute the integral of lambda and also zeros for the first point of lambda
zeros = torch.zeros([n_traj, model.input_dim + model.latent_dim]).to(get_device(data))
z0_grid_aug = torch.cat((z0_grid, zeros), -1)
else:
z0_grid_aug = z0_grid
# Shape of sol_y [n_traj_samples, n_samples, n_timepoints, n_latents]
sol_y = model.diffeq_solver(z0_grid_aug.unsqueeze(0), time_steps)
if model.use_poisson_proc:
sol_y, log_lambda_y, int_lambda, _ = model.diffeq_solver.ode_func.extract_poisson_rate(sol_y)
assert(torch.sum(int_lambda[:,:,0,:]) == 0.)
assert(torch.sum(int_lambda[0,0,-1,:] <= 0) == 0.)
pred_x = model.decoder(sol_y)
# Plot density for one trajectory
one_traj = data[traj_id]
mask_one_traj = None
if mask is not None:
mask_one_traj = mask[traj_id].unsqueeze(0)
mask_one_traj = mask_one_traj.repeat(npts**2,1,1).unsqueeze(0)
ax.cla()
# Plot: prior
prior_density_grid = model.z0_prior.log_prob(z0_grid.unsqueeze(0)).squeeze(0)
# Sum the density over two dimensions
prior_density_grid = torch.sum(prior_density_grid, -1)
# =================================================
# Plot: p(x | y(t0))
masked_gaussian_log_density_grid = masked_gaussian_log_density(pred_x,
one_traj.repeat(npts**2,1,1).unsqueeze(0),
mask = mask_one_traj,
obsrv_std = model.obsrv_std).squeeze(-1)
# Plot p(t | y(t0))
if model.use_poisson_proc:
poisson_info = {}
poisson_info["int_lambda"] = int_lambda[:,:,-1,:]
poisson_info["log_lambda_y"] = log_lambda_y
poisson_log_density_grid = compute_poisson_proc_likelihood(
one_traj.repeat(npts**2,1,1).unsqueeze(0),
pred_x, poisson_info, mask = mask_one_traj)
poisson_log_density_grid = poisson_log_density_grid.squeeze(0)
# =================================================
# Plot: p(x , y(t0))
log_joint_density = prior_density_grid + masked_gaussian_log_density_grid
if multiply_by_poisson:
log_joint_density = log_joint_density + poisson_log_density_grid
density_grid = torch.exp(log_joint_density)
density_grid = torch.reshape(density_grid, (xx.shape[0], xx.shape[1]))
density_grid = density_grid.cpu().numpy()
ax.contourf(xx, yy, density_grid, cmap=cmap, alpha=1)
# =================================================
# Plot: q(y(t0)| x)
#self.ax_density.set_title("Red: q(y(t0) | x) Blue: p(x, y(t0))")
ax.set_xlabel('z1(t0)')
ax.set_ylabel('z2(t0)')
data_w_mask = observed_data[traj_id].unsqueeze(0)
if observed_mask is not None:
data_w_mask = torch.cat((data_w_mask, observed_mask[traj_id].unsqueeze(0)), -1)
z0_mu, z0_std = model.encoder_z0(
data_w_mask, observed_time_steps)
if model.use_poisson_proc:
z0_mu = z0_mu[:, :, :model.latent_dim]
z0_std = z0_std[:, :, :model.latent_dim]
q_z0 = Normal(z0_mu, z0_std)
q_density_grid = q_z0.log_prob(z0_grid)
# Sum the density over two dimensions
q_density_grid = torch.sum(q_density_grid, -1)
density_grid = torch.exp(q_density_grid)
density_grid = torch.reshape(density_grid, (xx.shape[0], xx.shape[1]))
density_grid = density_grid.cpu().numpy()
ax.contourf(xx, yy, density_grid, cmap=cmap2, alpha=0.3)
def draw_all_plots_one_dim(self, data_dict, model,
plot_name = "", save = False, experimentID = 0.):
data = data_dict["data_to_predict"]
time_steps = data_dict["tp_to_predict"]
mask = data_dict["mask_predicted_data"]
observed_data = data_dict["observed_data"]
observed_time_steps = data_dict["observed_tp"]
observed_mask = data_dict["observed_mask"]
device = get_device(time_steps)
time_steps_to_predict = time_steps
if isinstance(model, LatentODE):
# sample at the original time points
time_steps_to_predict = utils.linspace_vector(time_steps[0], time_steps[-1], 100).to(device)
reconstructions, info = model.get_reconstruction(time_steps_to_predict,
observed_data, observed_time_steps, mask = observed_mask, n_traj_samples = 10)
n_traj_to_show = 3
# plot only 10 trajectories
data_for_plotting = observed_data[:n_traj_to_show]
mask_for_plotting = observed_mask[:n_traj_to_show]
reconstructions_for_plotting = reconstructions.mean(dim=0)[:n_traj_to_show]
reconstr_std = reconstructions.std(dim=0)[:n_traj_to_show]
dim_to_show = 0
max_y = max(
data_for_plotting[:,:,dim_to_show].cpu().numpy().max(),
reconstructions[:,:,dim_to_show].cpu().numpy().max())
min_y = min(
data_for_plotting[:,:,dim_to_show].cpu().numpy().min(),
reconstructions[:,:,dim_to_show].cpu().numpy().min())
############################################
# Plot reconstructions, true postrior and approximate posterior
cmap = plt.cm.get_cmap('Set1')
for traj_id in range(3):
# Plot observations
plot_trajectories(self.ax_traj[traj_id],
data_for_plotting[traj_id].unsqueeze(0), observed_time_steps,
mask = mask_for_plotting[traj_id].unsqueeze(0),
min_y = min_y, max_y = max_y, #title="True trajectories",
marker = 'o', linestyle='', dim_to_show = dim_to_show,
color = cmap(2))
# Plot reconstructions
plot_trajectories(self.ax_traj[traj_id],
reconstructions_for_plotting[traj_id].unsqueeze(0), time_steps_to_predict,
min_y = min_y, max_y = max_y, title="Sample {} (data space)".format(traj_id), dim_to_show = dim_to_show,
add_to_plot = True, marker = '', color = cmap(3), linewidth = 3)
# Plot variance estimated over multiple samples from approx posterior
plot_std(self.ax_traj[traj_id],
reconstructions_for_plotting[traj_id].unsqueeze(0), reconstr_std[traj_id].unsqueeze(0),
time_steps_to_predict, alpha=0.5, color = cmap(3))
self.set_plot_lims(self.ax_traj[traj_id], "traj_" + str(traj_id))
# Plot true posterior and approximate posterior
# self.draw_one_density_plot(self.ax_density[traj_id],
# model, data_dict, traj_id = traj_id,
# multiply_by_poisson = False)
# self.set_plot_lims(self.ax_density[traj_id], "density_" + str(traj_id))
# self.ax_density[traj_id].set_title("Sample {}: p(z0) and q(z0 | x)".format(traj_id))
############################################
# Get several samples for the same trajectory
# one_traj = data_for_plotting[:1]
# first_point = one_traj[:,0]
# samples_same_traj, _ = model.get_reconstruction(time_steps_to_predict,
# observed_data[:1], observed_time_steps, mask = observed_mask[:1], n_traj_samples = 5)
# samples_same_traj = samples_same_traj.squeeze(1)
# plot_trajectories(self.ax_samples_same_traj, samples_same_traj, time_steps_to_predict, marker = '')
# plot_trajectories(self.ax_samples_same_traj, one_traj, time_steps, linestyle = "",
# label = "True traj", add_to_plot = True, title="Reconstructions for the same trajectory (data space)")
############################################
# Plot trajectories from prior
if isinstance(model, LatentODE):
torch.manual_seed(1991)
np.random.seed(1991)
traj_from_prior = model.sample_traj_from_prior(time_steps_to_predict, n_traj_samples = 3)
# Since in this case n_traj = 1, n_traj_samples -- requested number of samples from the prior, squeeze n_traj dimension
traj_from_prior = traj_from_prior.squeeze(1)
plot_trajectories(self.ax_traj_from_prior, traj_from_prior, time_steps_to_predict,
marker = '', linewidth = 3)
self.ax_traj_from_prior.set_title("Samples from prior (data space)", pad = 20)
#self.set_plot_lims(self.ax_traj_from_prior, "traj_from_prior")
################################################
# Plot z0
# first_point_mu, first_point_std, first_point_enc = info["first_point"]
# dim1 = 0
# dim2 = 1
# self.ax_z0.cla()
# # first_point_enc shape: [1, n_traj, n_dims]
# self.ax_z0.scatter(first_point_enc.cpu()[0,:,dim1], first_point_enc.cpu()[0,:,dim2])
# self.ax_z0.set_title("Encodings z0 of all test trajectories (latent space)")
# self.ax_z0.set_xlabel('dim {}'.format(dim1))
# self.ax_z0.set_ylabel('dim {}'.format(dim2))
################################################
# Show vector field
self.ax_vector_field.cla()
plot_vector_field(self.ax_vector_field, model.diffeq_solver.ode_func, model.latent_dim, device)
self.ax_vector_field.set_title("Slice of vector field (latent space)", pad = 20)
self.set_plot_lims(self.ax_vector_field, "vector_field")
#self.ax_vector_field.set_ylim((-0.5, 1.5))
################################################
# Plot trajectories in the latent space
# shape before [1, n_traj, n_tp, n_latent_dims]
# Take only the first sample from approx posterior
latent_traj = info["latent_traj"][0,:n_traj_to_show]
# shape before permute: [1, n_tp, n_latent_dims]
self.ax_latent_traj.cla()
cmap = plt.cm.get_cmap('Accent')
n_latent_dims = latent_traj.size(-1)
custom_labels = {}
for i in range(n_latent_dims):
col = cmap(i)
plot_trajectories(self.ax_latent_traj, latent_traj, time_steps_to_predict,
title="Latent trajectories z(t) (latent space)", dim_to_show = i, color = col,
marker = '', add_to_plot = True,
linewidth = 3)
custom_labels['dim ' + str(i)] = Line2D([0], [0], color=col)
self.ax_latent_traj.set_ylabel("z")
self.ax_latent_traj.set_title("Latent trajectories z(t) (latent space)", pad = 20)
self.ax_latent_traj.legend(custom_labels.values(), custom_labels.keys(), loc = 'lower left')
self.set_plot_lims(self.ax_latent_traj, "latent_traj")
################################################
self.fig.tight_layout()
plt.draw()
if save:
dirname = "plots/" + str(experimentID) + "/"
os.makedirs(dirname, exist_ok=True)
self.fig.savefig(dirname + plot_name)
| 16,053 | 33.673866 | 125 | py |
steer | steer-master/latent_ode/lib/diffeq_solver.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import time
import numpy as np
import torch
import torch.nn as nn
import lib.utils as utils
from torch.distributions.multivariate_normal import MultivariateNormal
# git clone https://github.com/rtqichen/torchdiffeq.git
#from torchdiffeq import odeint as odeint
from torchdiffeq import odeint_stochastic_end_v3 as odeint
#from torchdiffeq import odeint_stochastic_end_v2 as odeint
#from torchdiffeq import odeint_stochastic_end_v2_inference as odeint_inference
#####################################################################################################
class DiffeqSolver(nn.Module):
def __init__(self, input_dim, ode_func, method, latents,
odeint_rtol=1e-4, odeint_atol=1e-5, device=torch.device("cpu")):
super(DiffeqSolver, self).__init__()
self.ode_method = method
self.latents = latents
self.device = device
self.ode_func = ode_func
self.odeint_rtol = odeint_rtol
self.odeint_atol = odeint_atol
def forward(self, first_point, time_steps_to_predict, backwards=False):
"""
# Decode the trajectory through ODE Solver
"""
n_traj_samples, n_traj = first_point.size()[0], first_point.size()[1]
n_dims = first_point.size()[-1]
#print("time_steps_to_predict")
#print(time_steps_to_predict)
if time_steps_to_predict.size()==2:
pred_y = odeint(self.ode_func, first_point, time_steps_to_predict,
rtol=self.odeint_rtol, atol=self.odeint_atol, method=self.ode_method,min_length=0.001) #,mode='train')
else:
pred_y = odeint(self.ode_func, first_point, time_steps_to_predict,
rtol=self.odeint_rtol, atol=self.odeint_atol, method=self.ode_method,min_length=0.001) # ,mode='test')
pred_y = pred_y.permute(1, 2, 0, 3)
assert(torch.mean(pred_y[:, :, 0, :] - first_point) < 0.001)
assert(pred_y.size()[0] == n_traj_samples)
assert(pred_y.size()[1] == n_traj)
return pred_y
def sample_traj_from_prior(self, starting_point_enc, time_steps_to_predict,
n_traj_samples=1):
"""
# Decode the trajectory through ODE Solver using samples from the prior
time_steps_to_predict: time steps at which we want to sample the new trajectory
"""
func = self.ode_func.sample_next_point_from_prior
pred_y = odeint(func, starting_point_enc, time_steps_to_predict,
rtol=self.odeint_rtol, atol=self.odeint_atol, method=self.ode_method)
# shape: [n_traj_samples, n_traj, n_tp, n_dim]
pred_y = pred_y.permute(1, 2, 0, 3)
return pred_y
| 2,845 | 37.986301 | 126 | py |
steer | steer-master/latent_ode/lib/ode_func.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.utils.spectral_norm import spectral_norm
import lib.utils as utils
#####################################################################################################
class ODEFunc(nn.Module):
def __init__(self, input_dim, latent_dim, ode_func_net, device = torch.device("cpu")):
"""
input_dim: dimensionality of the input
latent_dim: dimensionality used for ODE. Analog of a continous latent state
"""
super(ODEFunc, self).__init__()
self.input_dim = input_dim
self.device = device
utils.init_network_weights(ode_func_net)
self.gradient_net = ode_func_net
def forward(self, t_local, y, backwards = False):
"""
Perform one step in solving ODE. Given current data point y and current time point t_local, returns gradient dy/dt at this time point
t_local: current time point
y: value at the current time point
"""
grad = self.get_ode_gradient_nn(t_local, y)
if backwards:
grad = -grad
return grad
def get_ode_gradient_nn(self, t_local, y):
return self.gradient_net(y)
def sample_next_point_from_prior(self, t_local, y):
"""
t_local: current time point
y: value at the current time point
"""
return self.get_ode_gradient_nn(t_local, y)
#####################################################################################################
class ODEFunc_w_Poisson(ODEFunc):
def __init__(self, input_dim, latent_dim, ode_func_net,
lambda_net, device = torch.device("cpu")):
"""
input_dim: dimensionality of the input
latent_dim: dimensionality used for ODE. Analog of a continous latent state
"""
super(ODEFunc_w_Poisson, self).__init__(input_dim, latent_dim, ode_func_net, device)
self.latent_ode = ODEFunc(input_dim = input_dim,
latent_dim = latent_dim,
ode_func_net = ode_func_net,
device = device)
self.latent_dim = latent_dim
self.lambda_net = lambda_net
# The computation of poisson likelihood can become numerically unstable.
#The integral lambda(t) dt can take large values. In fact, it is equal to the expected number of events on the interval [0,T]
#Exponent of lambda can also take large values
# So we divide lambda by the constant and then multiply the integral of lambda by the constant
self.const_for_lambda = torch.Tensor([100.]).to(device)
def extract_poisson_rate(self, augmented, final_result = True):
y, log_lambdas, int_lambda = None, None, None
assert(augmented.size(-1) == self.latent_dim + self.input_dim)
latent_lam_dim = self.latent_dim // 2
if len(augmented.size()) == 3:
int_lambda = augmented[:,:,-self.input_dim:]
y_latent_lam = augmented[:,:,:-self.input_dim]
log_lambdas = self.lambda_net(y_latent_lam[:,:,-latent_lam_dim:])
y = y_latent_lam[:,:,:-latent_lam_dim]
elif len(augmented.size()) == 4:
int_lambda = augmented[:,:,:,-self.input_dim:]
y_latent_lam = augmented[:,:,:,:-self.input_dim]
log_lambdas = self.lambda_net(y_latent_lam[:,:,:,-latent_lam_dim:])
y = y_latent_lam[:,:,:,:-latent_lam_dim]
# Multiply the intergral over lambda by a constant
# only when we have finished the integral computation (i.e. this is not a call in get_ode_gradient_nn)
if final_result:
int_lambda = int_lambda * self.const_for_lambda
# Latents for performing reconstruction (y) have the same size as latent poisson rate (log_lambdas)
assert(y.size(-1) == latent_lam_dim)
return y, log_lambdas, int_lambda, y_latent_lam
def get_ode_gradient_nn(self, t_local, augmented):
y, log_lam, int_lambda, y_latent_lam = self.extract_poisson_rate(augmented, final_result = False)
dydt_dldt = self.latent_ode(t_local, y_latent_lam)
log_lam = log_lam - torch.log(self.const_for_lambda)
return torch.cat((dydt_dldt, torch.exp(log_lam)),-1)
| 3,935 | 32.641026 | 135 | py |
steer | steer-master/latent_ode/lib/latent_ode.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import sklearn as sk
import numpy as np
#import gc
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.distributions import kl_divergence, Independent
from lib.base_models import VAE_Baseline
class LatentODE(VAE_Baseline):
def __init__(self, input_dim, latent_dim, encoder_z0, decoder, diffeq_solver,
z0_prior, device, obsrv_std = None,
use_binary_classif = False, use_poisson_proc = False,
linear_classifier = False,
classif_per_tp = False,
n_labels = 1,
train_classif_w_reconstr = False):
super(LatentODE, self).__init__(
input_dim = input_dim, latent_dim = latent_dim,
z0_prior = z0_prior,
device = device, obsrv_std = obsrv_std,
use_binary_classif = use_binary_classif,
classif_per_tp = classif_per_tp,
linear_classifier = linear_classifier,
use_poisson_proc = use_poisson_proc,
n_labels = n_labels,
train_classif_w_reconstr = train_classif_w_reconstr)
self.encoder_z0 = encoder_z0
self.diffeq_solver = diffeq_solver
self.decoder = decoder
self.use_poisson_proc = use_poisson_proc
def get_reconstruction(self, time_steps_to_predict, truth, truth_time_steps,
mask = None, n_traj_samples = 1, run_backwards = True, mode = None):
if isinstance(self.encoder_z0, Encoder_z0_ODE_RNN) or \
isinstance(self.encoder_z0, Encoder_z0_RNN):
truth_w_mask = truth
if mask is not None:
truth_w_mask = torch.cat((truth, mask), -1)
first_point_mu, first_point_std = self.encoder_z0(
truth_w_mask, truth_time_steps, run_backwards = run_backwards)
means_z0 = first_point_mu.repeat(n_traj_samples, 1, 1)
sigma_z0 = first_point_std.repeat(n_traj_samples, 1, 1)
first_point_enc = utils.sample_standard_gaussian(means_z0, sigma_z0)
else:
raise Exception("Unknown encoder type {}".format(type(self.encoder_z0).__name__))
first_point_std = first_point_std.abs()
assert(torch.sum(first_point_std < 0) == 0.)
if self.use_poisson_proc:
n_traj_samples, n_traj, n_dims = first_point_enc.size()
# append a vector of zeros to compute the integral of lambda
zeros = torch.zeros([n_traj_samples, n_traj,self.input_dim]).to(get_device(truth))
first_point_enc_aug = torch.cat((first_point_enc, zeros), -1)
means_z0_aug = torch.cat((means_z0, zeros), -1)
else:
first_point_enc_aug = first_point_enc
means_z0_aug = means_z0
assert(not torch.isnan(time_steps_to_predict).any())
assert(not torch.isnan(first_point_enc).any())
assert(not torch.isnan(first_point_enc_aug).any())
# Shape of sol_y [n_traj_samples, n_samples, n_timepoints, n_latents]
sol_y = self.diffeq_solver(first_point_enc_aug, time_steps_to_predict)
if self.use_poisson_proc:
sol_y, log_lambda_y, int_lambda, _ = self.diffeq_solver.ode_func.extract_poisson_rate(sol_y)
assert(torch.sum(int_lambda[:,:,0,:]) == 0.)
assert(torch.sum(int_lambda[0,0,-1,:] <= 0) == 0.)
pred_x = self.decoder(sol_y)
all_extra_info = {
"first_point": (first_point_mu, first_point_std, first_point_enc),
"latent_traj": sol_y.detach()
}
if self.use_poisson_proc:
# intergral of lambda from the last step of ODE Solver
all_extra_info["int_lambda"] = int_lambda[:,:,-1,:]
all_extra_info["log_lambda_y"] = log_lambda_y
if self.use_binary_classif:
if self.classif_per_tp:
all_extra_info["label_predictions"] = self.classifier(sol_y)
else:
all_extra_info["label_predictions"] = self.classifier(first_point_enc).squeeze(-1)
return pred_x, all_extra_info
def sample_traj_from_prior(self, time_steps_to_predict, n_traj_samples = 1):
# input_dim = starting_point.size()[-1]
# starting_point = starting_point.view(1,1,input_dim)
# Sample z0 from prior
starting_point_enc = self.z0_prior.sample([n_traj_samples, 1, self.latent_dim]).squeeze(-1)
starting_point_enc_aug = starting_point_enc
if self.use_poisson_proc:
n_traj_samples, n_traj, n_dims = starting_point_enc.size()
# append a vector of zeros to compute the integral of lambda
zeros = torch.zeros(n_traj_samples, n_traj,self.input_dim).to(self.device)
starting_point_enc_aug = torch.cat((starting_point_enc, zeros), -1)
sol_y = self.diffeq_solver.sample_traj_from_prior(starting_point_enc_aug, time_steps_to_predict,
n_traj_samples = 3)
if self.use_poisson_proc:
sol_y, log_lambda_y, int_lambda, _ = self.diffeq_solver.ode_func.extract_poisson_rate(sol_y)
return self.decoder(sol_y)
| 4,826 | 33.478571 | 99 | py |
steer | steer-master/latent_ode/lib/likelihood_eval.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import gc
import numpy as np
import sklearn as sk
import numpy as np
#import gc
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.distributions import kl_divergence, Independent
def gaussian_log_likelihood(mu_2d, data_2d, obsrv_std, indices = None):
n_data_points = mu_2d.size()[-1]
if n_data_points > 0:
gaussian = Independent(Normal(loc = mu_2d, scale = obsrv_std.repeat(n_data_points)), 1)
log_prob = gaussian.log_prob(data_2d)
log_prob = log_prob / n_data_points
else:
log_prob = torch.zeros([1]).to(get_device(data_2d)).squeeze()
return log_prob
def poisson_log_likelihood(masked_log_lambdas, masked_data, indices, int_lambdas):
# masked_log_lambdas and masked_data
n_data_points = masked_data.size()[-1]
if n_data_points > 0:
log_prob = torch.sum(masked_log_lambdas) - int_lambdas[indices]
#log_prob = log_prob / n_data_points
else:
log_prob = torch.zeros([1]).to(get_device(masked_data)).squeeze()
return log_prob
def compute_binary_CE_loss(label_predictions, mortality_label):
#print("Computing binary classification loss: compute_CE_loss")
mortality_label = mortality_label.reshape(-1)
if len(label_predictions.size()) == 1:
label_predictions = label_predictions.unsqueeze(0)
n_traj_samples = label_predictions.size(0)
label_predictions = label_predictions.reshape(n_traj_samples, -1)
idx_not_nan = 1 - torch.isnan(mortality_label)
if len(idx_not_nan) == 0.:
print("All are labels are NaNs!")
ce_loss = torch.Tensor(0.).to(get_device(mortality_label))
label_predictions = label_predictions[:,idx_not_nan]
mortality_label = mortality_label[idx_not_nan]
if torch.sum(mortality_label == 0.) == 0 or torch.sum(mortality_label == 1.) == 0:
print("Warning: all examples in a batch belong to the same class -- please increase the batch size.")
assert(not torch.isnan(label_predictions).any())
assert(not torch.isnan(mortality_label).any())
# For each trajectory, we get n_traj_samples samples from z0 -- compute loss on all of them
mortality_label = mortality_label.repeat(n_traj_samples, 1)
ce_loss = nn.BCEWithLogitsLoss()(label_predictions, mortality_label)
# divide by number of patients in a batch
ce_loss = ce_loss / n_traj_samples
return ce_loss
def compute_multiclass_CE_loss(label_predictions, true_label, mask):
#print("Computing multi-class classification loss: compute_multiclass_CE_loss")
if (len(label_predictions.size()) == 3):
label_predictions = label_predictions.unsqueeze(0)
n_traj_samples, n_traj, n_tp, n_dims = label_predictions.size()
# assert(not torch.isnan(label_predictions).any())
# assert(not torch.isnan(true_label).any())
# For each trajectory, we get n_traj_samples samples from z0 -- compute loss on all of them
true_label = true_label.repeat(n_traj_samples, 1, 1)
label_predictions = label_predictions.reshape(n_traj_samples * n_traj * n_tp, n_dims)
true_label = true_label.reshape(n_traj_samples * n_traj * n_tp, n_dims)
# choose time points with at least one measurement
mask = torch.sum(mask, -1) > 0
# repeat the mask for each label to mark that the label for this time point is present
pred_mask = mask.repeat(n_dims, 1,1).permute(1,2,0)
label_mask = mask
pred_mask = pred_mask.repeat(n_traj_samples,1,1,1)
label_mask = label_mask.repeat(n_traj_samples,1,1,1)
pred_mask = pred_mask.reshape(n_traj_samples * n_traj * n_tp, n_dims)
label_mask = label_mask.reshape(n_traj_samples * n_traj * n_tp, 1)
if (label_predictions.size(-1) > 1) and (true_label.size(-1) > 1):
assert(label_predictions.size(-1) == true_label.size(-1))
# targets are in one-hot encoding -- convert to indices
_, true_label = true_label.max(-1)
res = []
for i in range(true_label.size(0)):
pred_masked = torch.masked_select(label_predictions[i], pred_mask[i].byte())
labels = torch.masked_select(true_label[i], label_mask[i].byte())
pred_masked = pred_masked.reshape(-1, n_dims)
if (len(labels) == 0):
continue
ce_loss = nn.CrossEntropyLoss()(pred_masked, labels.long())
res.append(ce_loss)
ce_loss = torch.stack(res, 0).to(get_device(label_predictions))
ce_loss = torch.mean(ce_loss)
# # divide by number of patients in a batch
# ce_loss = ce_loss / n_traj_samples
return ce_loss
def compute_masked_likelihood(mu, data, mask, likelihood_func):
# Compute the likelihood per patient and per attribute so that we don't priorize patients with more measurements
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
res = []
for i in range(n_traj_samples):
for k in range(n_traj):
for j in range(n_dims):
data_masked = torch.masked_select(data[i,k,:,j], mask[i,k,:,j].byte())
#assert(torch.sum(data_masked == 0.) < 10)
mu_masked = torch.masked_select(mu[i,k,:,j], mask[i,k,:,j].byte())
log_prob = likelihood_func(mu_masked, data_masked, indices = (i,k,j))
res.append(log_prob)
# shape: [n_traj*n_traj_samples, 1]
res = torch.stack(res, 0).to(get_device(data))
res = res.reshape((n_traj_samples, n_traj, n_dims))
# Take mean over the number of dimensions
res = torch.mean(res, -1) # !!!!!!!!!!! changed from sum to mean
res = res.transpose(0,1)
return res
def masked_gaussian_log_density(mu, data, obsrv_std, mask = None):
# these cases are for plotting through plot_estim_density
if (len(mu.size()) == 3):
# add additional dimension for gp samples
mu = mu.unsqueeze(0)
if (len(data.size()) == 2):
# add additional dimension for gp samples and time step
data = data.unsqueeze(0).unsqueeze(2)
elif (len(data.size()) == 3):
# add additional dimension for gp samples
data = data.unsqueeze(0)
n_traj_samples, n_traj, n_timepoints, n_dims = mu.size()
assert(data.size()[-1] == n_dims)
# Shape after permutation: [n_traj, n_traj_samples, n_timepoints, n_dims]
if mask is None:
mu_flat = mu.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
data_flat = data.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
res = gaussian_log_likelihood(mu_flat, data_flat, obsrv_std)
res = res.reshape(n_traj_samples, n_traj).transpose(0,1)
else:
# Compute the likelihood per patient so that we don't priorize patients with more measurements
func = lambda mu, data, indices: gaussian_log_likelihood(mu, data, obsrv_std = obsrv_std, indices = indices)
res = compute_masked_likelihood(mu, data, mask, func)
return res
def mse(mu, data, indices = None):
n_data_points = mu.size()[-1]
if n_data_points > 0:
mse = nn.MSELoss()(mu, data)
else:
mse = torch.zeros([1]).to(get_device(data)).squeeze()
return mse
def compute_mse(mu, data, mask = None):
# these cases are for plotting through plot_estim_density
if (len(mu.size()) == 3):
# add additional dimension for gp samples
mu = mu.unsqueeze(0)
if (len(data.size()) == 2):
# add additional dimension for gp samples and time step
data = data.unsqueeze(0).unsqueeze(2)
elif (len(data.size()) == 3):
# add additional dimension for gp samples
data = data.unsqueeze(0)
n_traj_samples, n_traj, n_timepoints, n_dims = mu.size()
assert(data.size()[-1] == n_dims)
# Shape after permutation: [n_traj, n_traj_samples, n_timepoints, n_dims]
if mask is None:
mu_flat = mu.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
data_flat = data.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
res = mse(mu_flat, data_flat)
else:
# Compute the likelihood per patient so that we don't priorize patients with more measurements
res = compute_masked_likelihood(mu, data, mask, mse)
return res
def compute_poisson_proc_likelihood(truth, pred_y, info, mask = None):
# Compute Poisson likelihood
# https://math.stackexchange.com/questions/344487/log-likelihood-of-a-realization-of-a-poisson-process
# Sum log lambdas across all time points
if mask is None:
poisson_log_l = torch.sum(info["log_lambda_y"], 2) - info["int_lambda"]
# Sum over data dims
poisson_log_l = torch.mean(poisson_log_l, -1)
else:
# Compute likelihood of the data under the predictions
truth_repeated = truth.repeat(pred_y.size(0), 1, 1, 1)
mask_repeated = mask.repeat(pred_y.size(0), 1, 1, 1)
# Compute the likelihood per patient and per attribute so that we don't priorize patients with more measurements
int_lambda = info["int_lambda"]
f = lambda log_lam, data, indices: poisson_log_likelihood(log_lam, data, indices, int_lambda)
poisson_log_l = compute_masked_likelihood(info["log_lambda_y"], truth_repeated, mask_repeated, f)
poisson_log_l = poisson_log_l.permute(1,0)
# Take mean over n_traj
#poisson_log_l = torch.mean(poisson_log_l, 1)
# poisson_log_l shape: [n_traj_samples, n_traj]
return poisson_log_l
| 9,166 | 33.592453 | 114 | py |
steer | steer-master/latent_ode/lib/utils.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import logging
import pickle
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import math
import glob
import re
from shutil import copyfile
import sklearn as sk
import subprocess
import datetime
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_checkpoint(state, save, epoch):
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, 'checkpt-%04d.pth' % epoch)
torch.save(state, filename)
def get_logger(logpath, filepath, package_files=[],
displaying=True, saving=True, debug=False):
logger = logging.getLogger()
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger.setLevel(level)
if saving:
info_file_handler = logging.FileHandler(logpath, mode='w')
info_file_handler.setLevel(level)
logger.addHandler(info_file_handler)
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.info(filepath)
for f in package_files:
logger.info(f)
with open(f, 'r') as package_f:
logger.info(package_f.read())
return logger
def inf_generator(iterable):
"""Allows training with DataLoaders in a single infinite loop:
for i, (x, y) in enumerate(inf_generator(train_loader)):
"""
iterator = iterable.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = iterable.__iter__()
def dump_pickle(data, filename):
with open(filename, 'wb') as pkl_file:
pickle.dump(data, pkl_file)
def load_pickle(filename):
with open(filename, 'rb') as pkl_file:
filecontent = pickle.load(pkl_file)
return filecontent
def make_dataset(dataset_type = "spiral",**kwargs):
if dataset_type == "spiral":
data_path = "data/spirals.pickle"
dataset = load_pickle(data_path)["dataset"]
chiralities = load_pickle(data_path)["chiralities"]
elif dataset_type == "chiralspiral":
data_path = "data/chiral-spirals.pickle"
dataset = load_pickle(data_path)["dataset"]
chiralities = load_pickle(data_path)["chiralities"]
else:
raise Exception("Unknown dataset type " + dataset_type)
return dataset, chiralities
def split_last_dim(data):
last_dim = data.size()[-1]
last_dim = last_dim//2
if len(data.size()) == 3:
res = data[:,:,:last_dim], data[:,:,last_dim:]
if len(data.size()) == 2:
res = data[:,:last_dim], data[:,last_dim:]
return res
def init_network_weights(net, std = 0.1):
for m in net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, val=0)
def flatten(x, dim):
return x.reshape(x.size()[:dim] + (-1, ))
def subsample_timepoints(data, time_steps, mask, n_tp_to_sample = None):
# n_tp_to_sample: number of time points to subsample. If not None, sample exactly n_tp_to_sample points
if n_tp_to_sample is None:
return data, time_steps, mask
n_tp_in_batch = len(time_steps)
if n_tp_to_sample > 1:
# Subsample exact number of points
assert(n_tp_to_sample <= n_tp_in_batch)
n_tp_to_sample = int(n_tp_to_sample)
for i in range(data.size(0)):
missing_idx = sorted(np.random.choice(np.arange(n_tp_in_batch), n_tp_in_batch - n_tp_to_sample, replace = False))
data[i, missing_idx] = 0.
if mask is not None:
mask[i, missing_idx] = 0.
elif (n_tp_to_sample <= 1) and (n_tp_to_sample > 0):
# Subsample percentage of points from each time series
percentage_tp_to_sample = n_tp_to_sample
for i in range(data.size(0)):
# take mask for current training sample and sum over all features -- figure out which time points don't have any measurements at all in this batch
current_mask = mask[i].sum(-1).cpu()
non_missing_tp = np.where(current_mask > 0)[0]
n_tp_current = len(non_missing_tp)
n_to_sample = int(n_tp_current * percentage_tp_to_sample)
subsampled_idx = sorted(np.random.choice(non_missing_tp, n_to_sample, replace = False))
tp_to_set_to_zero = np.setdiff1d(non_missing_tp, subsampled_idx)
data[i, tp_to_set_to_zero] = 0.
if mask is not None:
mask[i, tp_to_set_to_zero] = 0.
return data, time_steps, mask
def cut_out_timepoints(data, time_steps, mask, n_points_to_cut = None):
# n_points_to_cut: number of consecutive time points to cut out
if n_points_to_cut is None:
return data, time_steps, mask
n_tp_in_batch = len(time_steps)
if n_points_to_cut < 1:
raise Exception("Number of time points to cut out must be > 1")
assert(n_points_to_cut <= n_tp_in_batch)
n_points_to_cut = int(n_points_to_cut)
for i in range(data.size(0)):
start = np.random.choice(np.arange(5, n_tp_in_batch - n_points_to_cut-5), replace = False)
data[i, start : (start + n_points_to_cut)] = 0.
if mask is not None:
mask[i, start : (start + n_points_to_cut)] = 0.
return data, time_steps, mask
def get_device(tensor):
device = torch.device("cpu")
if tensor.is_cuda:
device = tensor.get_device()
return device
def sample_standard_gaussian(mu, sigma):
device = get_device(mu)
d = torch.distributions.normal.Normal(torch.Tensor([0.]).to(device), torch.Tensor([1.]).to(device))
r = d.sample(mu.size()).squeeze(-1)
return r * sigma.float() + mu.float()
def split_train_test(data, train_fraq = 0.8):
n_samples = data.size(0)
data_train = data[:int(n_samples * train_fraq)]
data_test = data[int(n_samples * train_fraq):]
return data_train, data_test
def split_train_test_data_and_time(data, time_steps, train_fraq = 0.8):
n_samples = data.size(0)
data_train = data[:int(n_samples * train_fraq)]
data_test = data[int(n_samples * train_fraq):]
assert(len(time_steps.size()) == 2)
train_time_steps = time_steps[:, :int(n_samples * train_fraq)]
test_time_steps = time_steps[:, int(n_samples * train_fraq):]
return data_train, data_test, train_time_steps, test_time_steps
def get_next_batch(dataloader):
# Make the union of all time points and perform normalization across the whole dataset
data_dict = dataloader.__next__()
batch_dict = get_dict_template()
# remove the time points where there are no observations in this batch
non_missing_tp = torch.sum(data_dict["observed_data"],(0,2)) != 0.
batch_dict["observed_data"] = data_dict["observed_data"][:, non_missing_tp]
batch_dict["observed_tp"] = data_dict["observed_tp"][non_missing_tp]
# print("observed data")
# print(batch_dict["observed_data"].size())
if ("observed_mask" in data_dict) and (data_dict["observed_mask"] is not None):
batch_dict["observed_mask"] = data_dict["observed_mask"][:, non_missing_tp]
batch_dict[ "data_to_predict"] = data_dict["data_to_predict"]
batch_dict["tp_to_predict"] = data_dict["tp_to_predict"]
non_missing_tp = torch.sum(data_dict["data_to_predict"],(0,2)) != 0.
batch_dict["data_to_predict"] = data_dict["data_to_predict"][:, non_missing_tp]
batch_dict["tp_to_predict"] = data_dict["tp_to_predict"][non_missing_tp]
# print("data_to_predict")
# print(batch_dict["data_to_predict"].size())
if ("mask_predicted_data" in data_dict) and (data_dict["mask_predicted_data"] is not None):
batch_dict["mask_predicted_data"] = data_dict["mask_predicted_data"][:, non_missing_tp]
if ("labels" in data_dict) and (data_dict["labels"] is not None):
batch_dict["labels"] = data_dict["labels"]
batch_dict["mode"] = data_dict["mode"]
return batch_dict
def get_ckpt_model(ckpt_path, model, device):
if not os.path.exists(ckpt_path):
raise Exception("Checkpoint " + ckpt_path + " does not exist.")
# Load checkpoint.
checkpt = torch.load(ckpt_path)
ckpt_args = checkpt['args']
state_dict = checkpt['state_dict']
model_dict = model.state_dict()
# 1. filter out unnecessary keys
state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(state_dict)
# 3. load the new state dict
model.load_state_dict(state_dict)
model.to(device)
def update_learning_rate(optimizer, decay_rate = 0.999, lowest = 1e-3):
for param_group in optimizer.param_groups:
lr = param_group['lr']
lr = max(lr * decay_rate, lowest)
param_group['lr'] = lr
def linspace_vector(start, end, n_points):
# start is either one value or a vector
size = np.prod(start.size())
assert(start.size() == end.size())
if size == 1:
# start and end are 1d-tensors
res = torch.linspace(start, end, n_points)
else:
# start and end are vectors
res = torch.Tensor()
for i in range(0, start.size(0)):
res = torch.cat((res,
torch.linspace(start[i], end[i], n_points)),0)
res = torch.t(res.reshape(start.size(0), n_points))
return res
def reverse(tensor):
idx = [i for i in range(tensor.size(0)-1, -1, -1)]
return tensor[idx]
def create_net(n_inputs, n_outputs, n_layers = 1,
n_units = 100, nonlinear = nn.Tanh):
layers = [nn.Linear(n_inputs, n_units)]
for i in range(n_layers):
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_units))
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_outputs))
return nn.Sequential(*layers)
def get_item_from_pickle(pickle_file, item_name):
from_pickle = load_pickle(pickle_file)
if item_name in from_pickle:
return from_pickle[item_name]
return None
def get_dict_template():
return {"observed_data": None,
"observed_tp": None,
"data_to_predict": None,
"tp_to_predict": None,
"observed_mask": None,
"mask_predicted_data": None,
"labels": None
}
def normalize_data(data):
reshaped = data.reshape(-1, data.size(-1))
att_min = torch.min(reshaped, 0)[0]
att_max = torch.max(reshaped, 0)[0]
# we don't want to divide by zero
att_max[ att_max == 0.] = 1.
if (att_max != 0.).all():
data_norm = (data - att_min) / att_max
else:
raise Exception("Zero!")
if torch.isnan(data_norm).any():
raise Exception("nans!")
return data_norm, att_min, att_max
def normalize_masked_data(data, mask, att_min, att_max):
# we don't want to divide by zero
att_max[ att_max == 0.] = 1.
if (att_max != 0.).all():
data_norm = (data - att_min) / att_max
else:
raise Exception("Zero!")
if torch.isnan(data_norm).any():
raise Exception("nans!")
# set masked out elements back to zero
data_norm[mask == 0] = 0
return data_norm, att_min, att_max
def shift_outputs(outputs, first_datapoint = None):
outputs = outputs[:,:,:-1,:]
if first_datapoint is not None:
n_traj, n_dims = first_datapoint.size()
first_datapoint = first_datapoint.reshape(1, n_traj, 1, n_dims)
outputs = torch.cat((first_datapoint, outputs), 2)
return outputs
def split_data_extrap(data_dict, dataset = ""):
device = get_device(data_dict["data"])
n_observed_tp = data_dict["data"].size(1) // 2
if dataset == "hopper":
n_observed_tp = data_dict["data"].size(1) // 3
split_dict = {"observed_data": data_dict["data"][:,:n_observed_tp,:].clone(),
"observed_tp": data_dict["time_steps"][:n_observed_tp].clone(),
"data_to_predict": data_dict["data"][:,n_observed_tp:,:].clone(),
"tp_to_predict": data_dict["time_steps"][n_observed_tp:].clone()}
split_dict["observed_mask"] = None
split_dict["mask_predicted_data"] = None
split_dict["labels"] = None
if ("mask" in data_dict) and (data_dict["mask"] is not None):
split_dict["observed_mask"] = data_dict["mask"][:, :n_observed_tp].clone()
split_dict["mask_predicted_data"] = data_dict["mask"][:, n_observed_tp:].clone()
if ("labels" in data_dict) and (data_dict["labels"] is not None):
split_dict["labels"] = data_dict["labels"].clone()
split_dict["mode"] = "extrap"
return split_dict
def split_data_interp(data_dict):
device = get_device(data_dict["data"])
split_dict = {"observed_data": data_dict["data"].clone(),
"observed_tp": data_dict["time_steps"].clone(),
"data_to_predict": data_dict["data"].clone(),
"tp_to_predict": data_dict["time_steps"].clone()}
split_dict["observed_mask"] = None
split_dict["mask_predicted_data"] = None
split_dict["labels"] = None
if "mask" in data_dict and data_dict["mask"] is not None:
split_dict["observed_mask"] = data_dict["mask"].clone()
split_dict["mask_predicted_data"] = data_dict["mask"].clone()
if ("labels" in data_dict) and (data_dict["labels"] is not None):
split_dict["labels"] = data_dict["labels"].clone()
split_dict["mode"] = "interp"
return split_dict
def add_mask(data_dict):
data = data_dict["observed_data"]
mask = data_dict["observed_mask"]
if mask is None:
mask = torch.ones_like(data).to(get_device(data))
data_dict["observed_mask"] = mask
return data_dict
def subsample_observed_data(data_dict, n_tp_to_sample = None, n_points_to_cut = None):
# n_tp_to_sample -- if not None, randomly subsample the time points. The resulting timeline has n_tp_to_sample points
# n_points_to_cut -- if not None, cut out consecutive points on the timeline. The resulting timeline has (N - n_points_to_cut) points
if n_tp_to_sample is not None:
# Randomly subsample time points
data, time_steps, mask = subsample_timepoints(
data_dict["observed_data"].clone(),
time_steps = data_dict["observed_tp"].clone(),
mask = (data_dict["observed_mask"].clone() if data_dict["observed_mask"] is not None else None),
n_tp_to_sample = n_tp_to_sample)
if n_points_to_cut is not None:
# Remove consecutive time points
data, time_steps, mask = cut_out_timepoints(
data_dict["observed_data"].clone(),
time_steps = data_dict["observed_tp"].clone(),
mask = (data_dict["observed_mask"].clone() if data_dict["observed_mask"] is not None else None),
n_points_to_cut = n_points_to_cut)
new_data_dict = {}
for key in data_dict.keys():
new_data_dict[key] = data_dict[key]
new_data_dict["observed_data"] = data.clone()
new_data_dict["observed_tp"] = time_steps.clone()
new_data_dict["observed_mask"] = mask.clone()
if n_points_to_cut is not None:
# Cut the section in the data to predict as well
# Used only for the demo on the periodic function
new_data_dict["data_to_predict"] = data.clone()
new_data_dict["tp_to_predict"] = time_steps.clone()
new_data_dict["mask_predicted_data"] = mask.clone()
return new_data_dict
def split_and_subsample_batch(data_dict, args, data_type = "train"):
if data_type == "train":
# Training set
if args.extrap:
processed_dict = split_data_extrap(data_dict, dataset = args.dataset)
else:
processed_dict = split_data_interp(data_dict)
else:
# Test set
if args.extrap:
processed_dict = split_data_extrap(data_dict, dataset = args.dataset)
else:
processed_dict = split_data_interp(data_dict)
# add mask
processed_dict = add_mask(processed_dict)
# Subsample points or cut out the whole section of the timeline
if (args.sample_tp is not None) or (args.cut_tp is not None):
processed_dict = subsample_observed_data(processed_dict,
n_tp_to_sample = args.sample_tp,
n_points_to_cut = args.cut_tp)
# if (args.sample_tp is not None):
# processed_dict = subsample_observed_data(processed_dict,
# n_tp_to_sample = args.sample_tp)
return processed_dict
def compute_loss_all_batches(model,
test_dataloader, args,
n_batches, experimentID, device,
n_traj_samples = 1, kl_coef = 1.,
max_samples_for_eval = None):
total = {}
total["loss"] = 0
total["likelihood"] = 0
total["mse"] = 0
total["kl_first_p"] = 0
total["std_first_p"] = 0
total["pois_likelihood"] = 0
total["ce_loss"] = 0
n_test_batches = 0
classif_predictions = torch.Tensor([]).to(device)
all_test_labels = torch.Tensor([]).to(device)
for i in range(n_batches):
print("Computing loss... " + str(i))
batch_dict = get_next_batch(test_dataloader)
results = model.compute_all_losses(batch_dict,
n_traj_samples = n_traj_samples, kl_coef = kl_coef)
if args.classif:
n_labels = model.n_labels #batch_dict["labels"].size(-1)
n_traj_samples = results["label_predictions"].size(0)
classif_predictions = torch.cat((classif_predictions,
results["label_predictions"].reshape(n_traj_samples, -1, n_labels)),1)
all_test_labels = torch.cat((all_test_labels,
batch_dict["labels"].reshape(-1, n_labels)),0)
for key in total.keys():
if key in results:
var = results[key]
if isinstance(var, torch.Tensor):
var = var.detach()
total[key] += var
n_test_batches += 1
# for speed
if max_samples_for_eval is not None:
if n_batches * batch_size >= max_samples_for_eval:
break
if n_test_batches > 0:
for key, value in total.items():
total[key] = total[key] / n_test_batches
if args.classif:
if args.dataset == "physionet":
#all_test_labels = all_test_labels.reshape(-1)
# For each trajectory, we get n_traj_samples samples from z0 -- compute loss on all of them
all_test_labels = all_test_labels.repeat(n_traj_samples,1,1)
idx_not_nan = 1 - torch.isnan(all_test_labels)
classif_predictions = classif_predictions[idx_not_nan]
all_test_labels = all_test_labels[idx_not_nan]
dirname = "plots/" + str(experimentID) + "/"
os.makedirs(dirname, exist_ok=True)
total["auc"] = 0.
if torch.sum(all_test_labels) != 0.:
print("Number of labeled examples: {}".format(len(all_test_labels.reshape(-1))))
print("Number of examples with mortality 1: {}".format(torch.sum(all_test_labels == 1.)))
# Cannot compute AUC with only 1 class
total["auc"] = sk.metrics.roc_auc_score(all_test_labels.cpu().numpy().reshape(-1),
classif_predictions.cpu().numpy().reshape(-1))
else:
print("Warning: Couldn't compute AUC -- all examples are from the same class")
if args.dataset == "activity":
all_test_labels = all_test_labels.repeat(n_traj_samples,1,1)
labeled_tp = torch.sum(all_test_labels, -1) > 0.
all_test_labels = all_test_labels[labeled_tp]
classif_predictions = classif_predictions[labeled_tp]
# classif_predictions and all_test_labels are in on-hot-encoding -- convert to class ids
_, pred_class_id = torch.max(classif_predictions, -1)
_, class_labels = torch.max(all_test_labels, -1)
pred_class_id = pred_class_id.reshape(-1)
total["accuracy"] = sk.metrics.accuracy_score(
class_labels.cpu().numpy(),
pred_class_id.cpu().numpy())
return total
def check_mask(data, mask):
#check that "mask" argument indeed contains a mask for data
n_zeros = torch.sum(mask == 0.).cpu().numpy()
n_ones = torch.sum(mask == 1.).cpu().numpy()
# mask should contain only zeros and ones
assert((n_zeros + n_ones) == np.prod(list(mask.size())))
# all masked out elements should be zeros
assert(torch.sum(data[mask == 0.] != 0.) == 0)
| 18,626 | 28.660828 | 149 | py |
steer | steer-master/latent_ode/lib/encoder_decoder.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from torch.distributions import Categorical, Normal
import lib.utils as utils
from torch.nn.modules.rnn import LSTM, GRU
from lib.utils import get_device
# GRU description:
# http://www.wildml.com/2015/10/recurrent-neural-network-tutorial-part-4-implementing-a-grulstm-rnn-with-python-and-theano/
class GRU_unit(nn.Module):
def __init__(self, latent_dim, input_dim,
update_gate = None,
reset_gate = None,
new_state_net = None,
n_units = 100,
device = torch.device("cpu")):
super(GRU_unit, self).__init__()
if update_gate is None:
self.update_gate = nn.Sequential(
nn.Linear(latent_dim * 2 + input_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, latent_dim),
nn.Sigmoid())
utils.init_network_weights(self.update_gate)
else:
self.update_gate = update_gate
if reset_gate is None:
self.reset_gate = nn.Sequential(
nn.Linear(latent_dim * 2 + input_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, latent_dim),
nn.Sigmoid())
utils.init_network_weights(self.reset_gate)
else:
self.reset_gate = reset_gate
if new_state_net is None:
self.new_state_net = nn.Sequential(
nn.Linear(latent_dim * 2 + input_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, latent_dim * 2))
utils.init_network_weights(self.new_state_net)
else:
self.new_state_net = new_state_net
def forward(self, y_mean, y_std, x, masked_update = True):
y_concat = torch.cat([y_mean, y_std, x], -1)
update_gate = self.update_gate(y_concat)
reset_gate = self.reset_gate(y_concat)
concat = torch.cat([y_mean * reset_gate, y_std * reset_gate, x], -1)
new_state, new_state_std = utils.split_last_dim(self.new_state_net(concat))
new_state_std = new_state_std.abs()
new_y = (1-update_gate) * new_state + update_gate * y_mean
new_y_std = (1-update_gate) * new_state_std + update_gate * y_std
assert(not torch.isnan(new_y).any())
if masked_update:
# IMPORTANT: assumes that x contains both data and mask
# update only the hidden states for hidden state only if at least one feature is present for the current time point
n_data_dims = x.size(-1)//2
mask = x[:, :, n_data_dims:]
utils.check_mask(x[:, :, :n_data_dims], mask)
mask = (torch.sum(mask, -1, keepdim = True) > 0).float()
assert(not torch.isnan(mask).any())
new_y = mask * new_y + (1-mask) * y_mean
new_y_std = mask * new_y_std + (1-mask) * y_std
if torch.isnan(new_y).any():
print("new_y is nan!")
print(mask)
print(y_mean)
print(prev_new_y)
exit()
new_y_std = new_y_std.abs()
return new_y, new_y_std
class Encoder_z0_RNN(nn.Module):
def __init__(self, latent_dim, input_dim, lstm_output_size = 20,
use_delta_t = True, device = torch.device("cpu")):
super(Encoder_z0_RNN, self).__init__()
self.gru_rnn_output_size = lstm_output_size
self.latent_dim = latent_dim
self.input_dim = input_dim
self.device = device
self.use_delta_t = use_delta_t
self.hiddens_to_z0 = nn.Sequential(
nn.Linear(self.gru_rnn_output_size, 50),
nn.Tanh(),
nn.Linear(50, latent_dim * 2),)
utils.init_network_weights(self.hiddens_to_z0)
input_dim = self.input_dim
if use_delta_t:
self.input_dim += 1
self.gru_rnn = GRU(self.input_dim, self.gru_rnn_output_size).to(device)
def forward(self, data, time_steps, run_backwards = True):
# IMPORTANT: assumes that 'data' already has mask concatenated to it
# data shape: [n_traj, n_tp, n_dims]
# shape required for rnn: (seq_len, batch, input_size)
# t0: not used here
n_traj = data.size(0)
assert(not torch.isnan(data).any())
assert(not torch.isnan(time_steps).any())
data = data.permute(1,0,2)
if run_backwards:
# Look at data in the reverse order: from later points to the first
data = utils.reverse(data)
if self.use_delta_t:
delta_t = time_steps[1:] - time_steps[:-1]
if run_backwards:
# we are going backwards in time with
delta_t = utils.reverse(delta_t)
# append zero delta t in the end
delta_t = torch.cat((delta_t, torch.zeros(1).to(self.device)))
delta_t = delta_t.unsqueeze(1).repeat((1,n_traj)).unsqueeze(-1)
data = torch.cat((delta_t, data),-1)
outputs, _ = self.gru_rnn(data)
# LSTM output shape: (seq_len, batch, num_directions * hidden_size)
last_output = outputs[-1]
self.extra_info ={"rnn_outputs": outputs, "time_points": time_steps}
mean, std = utils.split_last_dim(self.hiddens_to_z0(last_output))
std = std.abs()
assert(not torch.isnan(mean).any())
assert(not torch.isnan(std).any())
return mean.unsqueeze(0), std.unsqueeze(0)
class Encoder_z0_ODE_RNN(nn.Module):
# Derive z0 by running ode backwards.
# For every y_i we have two versions: encoded from data and derived from ODE by running it backwards from t_i+1 to t_i
# Compute a weighted sum of y_i from data and y_i from ode. Use weighted y_i as an initial value for ODE runing from t_i to t_i-1
# Continue until we get to z0
def __init__(self, latent_dim, input_dim, z0_diffeq_solver = None,
z0_dim = None, GRU_update = None,
n_gru_units = 100,
device = torch.device("cpu")):
super(Encoder_z0_ODE_RNN, self).__init__()
if z0_dim is None:
self.z0_dim = latent_dim
else:
self.z0_dim = z0_dim
if GRU_update is None:
self.GRU_update = GRU_unit(latent_dim, input_dim,
n_units = n_gru_units,
device=device).to(device)
else:
self.GRU_update = GRU_update
self.z0_diffeq_solver = z0_diffeq_solver
self.latent_dim = latent_dim
self.input_dim = input_dim
self.device = device
self.extra_info = None
self.transform_z0 = nn.Sequential(
nn.Linear(latent_dim * 2, 100),
nn.Tanh(),
nn.Linear(100, self.z0_dim * 2),)
utils.init_network_weights(self.transform_z0)
def forward(self, data, time_steps, run_backwards = True, save_info = False):
# data, time_steps -- observations and their time stamps
# IMPORTANT: assumes that 'data' already has mask concatenated to it
assert(not torch.isnan(data).any())
assert(not torch.isnan(time_steps).any())
n_traj, n_tp, n_dims = data.size()
if len(time_steps) == 1:
prev_y = torch.zeros((1, n_traj, self.latent_dim)).to(self.device)
prev_std = torch.zeros((1, n_traj, self.latent_dim)).to(self.device)
xi = data[:,0,:].unsqueeze(0)
last_yi, last_yi_std = self.GRU_update(prev_y, prev_std, xi)
extra_info = None
else:
last_yi, last_yi_std, _, extra_info = self.run_odernn(
data, time_steps, run_backwards = run_backwards,
save_info = save_info)
means_z0 = last_yi.reshape(1, n_traj, self.latent_dim)
std_z0 = last_yi_std.reshape(1, n_traj, self.latent_dim)
mean_z0, std_z0 = utils.split_last_dim( self.transform_z0( torch.cat((means_z0, std_z0), -1)))
std_z0 = std_z0.abs()
if save_info:
self.extra_info = extra_info
return mean_z0, std_z0
def run_odernn(self, data, time_steps,
run_backwards = True, save_info = False):
# IMPORTANT: assumes that 'data' already has mask concatenated to it
n_traj, n_tp, n_dims = data.size()
extra_info = []
t0 = time_steps[-1]
if run_backwards:
t0 = time_steps[0]
device = get_device(data)
prev_y = torch.zeros((1, n_traj, self.latent_dim)).to(device)
prev_std = torch.zeros((1, n_traj, self.latent_dim)).to(device)
prev_t, t_i = time_steps[-1] + 0.01, time_steps[-1]
interval_length = time_steps[-1] - time_steps[0]
minimum_step = interval_length / 50
#print("minimum step: {}".format(minimum_step))
assert(not torch.isnan(data).any())
assert(not torch.isnan(time_steps).any())
latent_ys = []
# Run ODE backwards and combine the y(t) estimates using gating
time_points_iter = range(0, len(time_steps))
if run_backwards:
time_points_iter = reversed(time_points_iter)
for i in time_points_iter:
if (prev_t - t_i) < minimum_step:
time_points = torch.stack((prev_t, t_i))
inc = self.z0_diffeq_solver.ode_func(prev_t, prev_y) * (t_i - prev_t)
assert(not torch.isnan(inc).any())
ode_sol = prev_y + inc
ode_sol = torch.stack((prev_y, ode_sol), 2).to(device)
assert(not torch.isnan(ode_sol).any())
else:
n_intermediate_tp = max(2, ((prev_t - t_i) / minimum_step).int())
time_points = utils.linspace_vector(prev_t, t_i, n_intermediate_tp)
ode_sol = self.z0_diffeq_solver(prev_y, time_points)
assert(not torch.isnan(ode_sol).any())
if torch.mean(ode_sol[:, :, 0, :] - prev_y) >= 0.001:
print("Error: first point of the ODE is not equal to initial value")
print(torch.mean(ode_sol[:, :, 0, :] - prev_y))
exit()
#assert(torch.mean(ode_sol[:, :, 0, :] - prev_y) < 0.001)
yi_ode = ode_sol[:, :, -1, :]
xi = data[:,i,:].unsqueeze(0)
yi, yi_std = self.GRU_update(yi_ode, prev_std, xi)
prev_y, prev_std = yi, yi_std
prev_t, t_i = time_steps[i], time_steps[i-1]
latent_ys.append(yi)
if save_info:
d = {"yi_ode": yi_ode.detach(), #"yi_from_data": yi_from_data,
"yi": yi.detach(), "yi_std": yi_std.detach(),
"time_points": time_points.detach(), "ode_sol": ode_sol.detach()}
extra_info.append(d)
latent_ys = torch.stack(latent_ys, 1)
assert(not torch.isnan(yi).any())
assert(not torch.isnan(yi_std).any())
return yi, yi_std, latent_ys, extra_info
class Decoder(nn.Module):
def __init__(self, latent_dim, input_dim):
super(Decoder, self).__init__()
# decode data from latent space where we are solving an ODE back to the data space
decoder = nn.Sequential(
nn.Linear(latent_dim, input_dim),)
utils.init_network_weights(decoder)
self.decoder = decoder
def forward(self, data):
return self.decoder(data)
| 9,918 | 28.520833 | 130 | py |
steer | steer-master/latent_ode/lib/base_models.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.nn.modules.rnn import GRUCell, LSTMCell, RNNCellBase
from torch.distributions.normal import Normal
from torch.distributions import Independent
from torch.nn.parameter import Parameter
def create_classifier(z0_dim, n_labels):
return nn.Sequential(
nn.Linear(z0_dim, 300),
nn.ReLU(),
nn.Linear(300, 300),
nn.ReLU(),
nn.Linear(300, n_labels),)
class Baseline(nn.Module):
def __init__(self, input_dim, latent_dim, device,
obsrv_std = 0.01, use_binary_classif = False,
classif_per_tp = False,
use_poisson_proc = False,
linear_classifier = False,
n_labels = 1,
train_classif_w_reconstr = False):
super(Baseline, self).__init__()
self.input_dim = input_dim
self.latent_dim = latent_dim
self.n_labels = n_labels
self.obsrv_std = torch.Tensor([obsrv_std]).to(device)
self.device = device
self.use_binary_classif = use_binary_classif
self.classif_per_tp = classif_per_tp
self.use_poisson_proc = use_poisson_proc
self.linear_classifier = linear_classifier
self.train_classif_w_reconstr = train_classif_w_reconstr
z0_dim = latent_dim
if use_poisson_proc:
z0_dim += latent_dim
if use_binary_classif:
if linear_classifier:
self.classifier = nn.Sequential(
nn.Linear(z0_dim, n_labels))
else:
self.classifier = create_classifier(z0_dim, n_labels)
utils.init_network_weights(self.classifier)
def get_gaussian_likelihood(self, truth, pred_y, mask = None):
# pred_y shape [n_traj_samples, n_traj, n_tp, n_dim]
# truth shape [n_traj, n_tp, n_dim]
if mask is not None:
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
# Compute likelihood of the data under the predictions
log_density_data = masked_gaussian_log_density(pred_y, truth,
obsrv_std = self.obsrv_std, mask = mask)
log_density_data = log_density_data.permute(1,0)
# Compute the total density
# Take mean over n_traj_samples
log_density = torch.mean(log_density_data, 0)
# shape: [n_traj]
return log_density
def get_mse(self, truth, pred_y, mask = None):
# pred_y shape [n_traj_samples, n_traj, n_tp, n_dim]
# truth shape [n_traj, n_tp, n_dim]
if mask is not None:
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
# Compute likelihood of the data under the predictions
log_density_data = compute_mse(pred_y, truth, mask = mask)
# shape: [1]
return torch.mean(log_density_data)
def compute_all_losses(self, batch_dict,
n_tp_to_sample = None, n_traj_samples = 1, kl_coef = 1.):
# Condition on subsampled points
# Make predictions for all the points
pred_x, info = self.get_reconstruction(batch_dict["tp_to_predict"],
batch_dict["observed_data"], batch_dict["observed_tp"],
mask = batch_dict["observed_mask"], n_traj_samples = n_traj_samples,
mode = batch_dict["mode"])
# Compute likelihood of all the points
likelihood = self.get_gaussian_likelihood(batch_dict["data_to_predict"], pred_x,
mask = batch_dict["mask_predicted_data"])
mse = self.get_mse(batch_dict["data_to_predict"], pred_x,
mask = batch_dict["mask_predicted_data"])
################################
# Compute CE loss for binary classification on Physionet
# Use only last attribute -- mortatility in the hospital
device = get_device(batch_dict["data_to_predict"])
ce_loss = torch.Tensor([0.]).to(device)
if (batch_dict["labels"] is not None) and self.use_binary_classif:
if (batch_dict["labels"].size(-1) == 1) or (len(batch_dict["labels"].size()) == 1):
ce_loss = compute_binary_CE_loss(
info["label_predictions"],
batch_dict["labels"])
else:
ce_loss = compute_multiclass_CE_loss(
info["label_predictions"],
batch_dict["labels"],
mask = batch_dict["mask_predicted_data"])
if torch.isnan(ce_loss):
print("label pred")
print(info["label_predictions"])
print("labels")
print( batch_dict["labels"])
raise Exception("CE loss is Nan!")
pois_log_likelihood = torch.Tensor([0.]).to(get_device(batch_dict["data_to_predict"]))
if self.use_poisson_proc:
pois_log_likelihood = compute_poisson_proc_likelihood(
batch_dict["data_to_predict"], pred_x,
info, mask = batch_dict["mask_predicted_data"])
# Take mean over n_traj
pois_log_likelihood = torch.mean(pois_log_likelihood, 1)
loss = - torch.mean(likelihood)
if self.use_poisson_proc:
loss = loss - 0.1 * pois_log_likelihood
if self.use_binary_classif:
if self.train_classif_w_reconstr:
loss = loss + ce_loss * 100
else:
loss = ce_loss
# Take mean over the number of samples in a batch
results = {}
results["loss"] = torch.mean(loss)
results["likelihood"] = torch.mean(likelihood).detach()
results["mse"] = torch.mean(mse).detach()
results["pois_likelihood"] = torch.mean(pois_log_likelihood).detach()
results["ce_loss"] = torch.mean(ce_loss).detach()
results["kl"] = 0.
results["kl_first_p"] = 0.
results["std_first_p"] = 0.
if batch_dict["labels"] is not None and self.use_binary_classif:
results["label_predictions"] = info["label_predictions"].detach()
return results
class VAE_Baseline(nn.Module):
def __init__(self, input_dim, latent_dim,
z0_prior, device,
obsrv_std = 0.01,
use_binary_classif = False,
classif_per_tp = False,
use_poisson_proc = False,
linear_classifier = False,
n_labels = 1,
train_classif_w_reconstr = False):
super(VAE_Baseline, self).__init__()
self.input_dim = input_dim
self.latent_dim = latent_dim
self.device = device
self.n_labels = n_labels
self.obsrv_std = torch.Tensor([obsrv_std]).to(device)
self.z0_prior = z0_prior
self.use_binary_classif = use_binary_classif
self.classif_per_tp = classif_per_tp
self.use_poisson_proc = use_poisson_proc
self.linear_classifier = linear_classifier
self.train_classif_w_reconstr = train_classif_w_reconstr
z0_dim = latent_dim
if use_poisson_proc:
z0_dim += latent_dim
if use_binary_classif:
if linear_classifier:
self.classifier = nn.Sequential(
nn.Linear(z0_dim, n_labels))
else:
self.classifier = create_classifier(z0_dim, n_labels)
utils.init_network_weights(self.classifier)
def get_gaussian_likelihood(self, truth, pred_y, mask = None):
# pred_y shape [n_traj_samples, n_traj, n_tp, n_dim]
# truth shape [n_traj, n_tp, n_dim]
n_traj, n_tp, n_dim = truth.size()
# Compute likelihood of the data under the predictions
truth_repeated = truth.repeat(pred_y.size(0), 1, 1, 1)
if mask is not None:
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
log_density_data = masked_gaussian_log_density(pred_y, truth_repeated,
obsrv_std = self.obsrv_std, mask = mask)
log_density_data = log_density_data.permute(1,0)
log_density = torch.mean(log_density_data, 1)
# shape: [n_traj_samples]
return log_density
def get_mse(self, truth, pred_y, mask = None):
# pred_y shape [n_traj_samples, n_traj, n_tp, n_dim]
# truth shape [n_traj, n_tp, n_dim]
n_traj, n_tp, n_dim = truth.size()
# Compute likelihood of the data under the predictions
truth_repeated = truth.repeat(pred_y.size(0), 1, 1, 1)
if mask is not None:
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
# Compute likelihood of the data under the predictions
log_density_data = compute_mse(pred_y, truth_repeated, mask = mask)
# shape: [1]
return torch.mean(log_density_data)
def compute_all_losses(self, batch_dict, n_traj_samples = 1, kl_coef = 1.):
# Condition on subsampled points
# Make predictions for all the points
pred_y, info = self.get_reconstruction(batch_dict["tp_to_predict"],
batch_dict["observed_data"], batch_dict["observed_tp"],
mask = batch_dict["observed_mask"], n_traj_samples = n_traj_samples,
mode = batch_dict["mode"])
#print("get_reconstruction done -- computing likelihood")
fp_mu, fp_std, fp_enc = info["first_point"]
fp_std = fp_std.abs()
fp_distr = Normal(fp_mu, fp_std)
assert(torch.sum(fp_std < 0) == 0.)
kldiv_z0 = kl_divergence(fp_distr, self.z0_prior)
if torch.isnan(kldiv_z0).any():
print(fp_mu)
print(fp_std)
raise Exception("kldiv_z0 is Nan!")
# Mean over number of latent dimensions
# kldiv_z0 shape: [n_traj_samples, n_traj, n_latent_dims] if prior is a mixture of gaussians (KL is estimated)
# kldiv_z0 shape: [1, n_traj, n_latent_dims] if prior is a standard gaussian (KL is computed exactly)
# shape after: [n_traj_samples]
kldiv_z0 = torch.mean(kldiv_z0,(1,2))
# Compute likelihood of all the points
rec_likelihood = self.get_gaussian_likelihood(
batch_dict["data_to_predict"], pred_y,
mask = batch_dict["mask_predicted_data"])
mse = self.get_mse(
batch_dict["data_to_predict"], pred_y,
mask = batch_dict["mask_predicted_data"])
pois_log_likelihood = torch.Tensor([0.]).to(get_device(batch_dict["data_to_predict"]))
if self.use_poisson_proc:
pois_log_likelihood = compute_poisson_proc_likelihood(
batch_dict["data_to_predict"], pred_y,
info, mask = batch_dict["mask_predicted_data"])
# Take mean over n_traj
pois_log_likelihood = torch.mean(pois_log_likelihood, 1)
################################
# Compute CE loss for binary classification on Physionet
device = get_device(batch_dict["data_to_predict"])
ce_loss = torch.Tensor([0.]).to(device)
if (batch_dict["labels"] is not None) and self.use_binary_classif:
if (batch_dict["labels"].size(-1) == 1) or (len(batch_dict["labels"].size()) == 1):
ce_loss = compute_binary_CE_loss(
info["label_predictions"],
batch_dict["labels"])
else:
ce_loss = compute_multiclass_CE_loss(
info["label_predictions"],
batch_dict["labels"],
mask = batch_dict["mask_predicted_data"])
# IWAE loss
loss = - torch.logsumexp(rec_likelihood - kl_coef * kldiv_z0,0)
if torch.isnan(loss):
loss = - torch.mean(rec_likelihood - kl_coef * kldiv_z0,0)
if self.use_poisson_proc:
loss = loss - 0.1 * pois_log_likelihood
if self.use_binary_classif:
if self.train_classif_w_reconstr:
loss = loss + ce_loss * 100
else:
loss = ce_loss
results = {}
results["loss"] = torch.mean(loss)
results["likelihood"] = torch.mean(rec_likelihood).detach()
results["mse"] = torch.mean(mse).detach()
results["pois_likelihood"] = torch.mean(pois_log_likelihood).detach()
results["ce_loss"] = torch.mean(ce_loss).detach()
results["kl_first_p"] = torch.mean(kldiv_z0).detach()
results["std_first_p"] = torch.mean(fp_std).detach()
if batch_dict["labels"] is not None and self.use_binary_classif:
results["label_predictions"] = info["label_predictions"].detach()
return results
| 11,032 | 31.072674 | 112 | py |
steer | steer-master/latent_ode/lib/parse_datasets.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import numpy as np
import torch
import torch.nn as nn
import lib.utils as utils
from lib.diffeq_solver import DiffeqSolver
from generate_timeseries import Periodic_1d
from torch.distributions import uniform
from torch.utils.data import DataLoader
from mujoco_physics import HopperPhysics
from physionet import PhysioNet, variable_time_collate_fn, get_data_min_max
from person_activity import PersonActivity, variable_time_collate_fn_activity
from sklearn import model_selection
import random
#####################################################################################################
def parse_datasets(args, device):
def basic_collate_fn(batch, time_steps, args = args, device = device, data_type = "train"):
batch = torch.stack(batch)
data_dict = {
"data": batch,
"time_steps": time_steps}
data_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)
return data_dict
dataset_name = args.dataset
n_total_tp = args.timepoints + args.extrap
max_t_extrap = args.max_t / args.timepoints * n_total_tp
##################################################################
# MuJoCo dataset
if dataset_name == "hopper":
dataset_obj = HopperPhysics(root='data', download=True, generate=False, device = device)
dataset = dataset_obj.get_dataset()[:args.n]
dataset = dataset.to(device)
n_tp_data = dataset[:].shape[1]
# Time steps that are used later on for exrapolation
time_steps = torch.arange(start=0, end = n_tp_data, step=1).float().to(device)
time_steps = time_steps / len(time_steps)
dataset = dataset.to(device)
time_steps = time_steps.to(device)
if not args.extrap:
# Creating dataset for interpolation
# sample time points from different parts of the timeline,
# so that the model learns from different parts of hopper trajectory
n_traj = len(dataset)
n_tp_data = dataset.shape[1]
n_reduced_tp = args.timepoints
# sample time points from different parts of the timeline,
# so that the model learns from different parts of hopper trajectory
start_ind = np.random.randint(0, high=n_tp_data - n_reduced_tp +1, size=n_traj)
end_ind = start_ind + n_reduced_tp
sliced = []
for i in range(n_traj):
sliced.append(dataset[i, start_ind[i] : end_ind[i], :])
dataset = torch.stack(sliced).to(device)
time_steps = time_steps[:n_reduced_tp]
# Split into train and test by the time sequences
train_y, test_y = utils.split_train_test(dataset, train_fraq = 0.8)
n_samples = len(dataset)
input_dim = dataset.size(-1)
batch_size = min(args.batch_size, args.n)
train_dataloader = DataLoader(train_y, batch_size = batch_size, shuffle=False,
collate_fn= lambda batch: basic_collate_fn(batch, time_steps, data_type = "train"))
test_dataloader = DataLoader(test_y, batch_size = n_samples, shuffle=False,
collate_fn= lambda batch: basic_collate_fn(batch, time_steps, data_type = "test"))
data_objects = {"dataset_obj": dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader)}
return data_objects
##################################################################
# Physionet dataset
if dataset_name == "physionet":
train_dataset_obj = PhysioNet('data/physionet', train=True,
quantization = args.quantization,
download=True, n_samples = min(10000, args.n),
device = device)
# Use custom collate_fn to combine samples with arbitrary time observations.
# Returns the dataset along with mask and time steps
test_dataset_obj = PhysioNet('data/physionet', train=False,
quantization = args.quantization,
download=True, n_samples = min(10000, args.n),
device = device)
# Combine and shuffle samples from physionet Train and physionet Test
total_dataset = train_dataset_obj[:len(train_dataset_obj)]
if not args.classif:
# Concatenate samples from original Train and Test sets
# Only 'training' physionet samples are have labels. Therefore, if we do classifiction task, we don't need physionet 'test' samples.
total_dataset = total_dataset + test_dataset_obj[:len(test_dataset_obj)]
# Shuffle and split
train_data, test_data = model_selection.train_test_split(total_dataset, train_size= 0.8,
random_state = 42, shuffle = True)
record_id, tt, vals, mask, labels = train_data[0]
n_samples = len(total_dataset)
input_dim = vals.size(-1)
batch_size = min(min(len(train_dataset_obj), args.batch_size), args.n)
data_min, data_max = get_data_min_max(total_dataset)
train_dataloader = DataLoader(train_data, batch_size= batch_size, shuffle=False,
collate_fn= lambda batch: variable_time_collate_fn(batch, args, device, data_type = "train",
data_min = data_min, data_max = data_max))
test_dataloader = DataLoader(test_data, batch_size = n_samples, shuffle=False,
collate_fn= lambda batch: variable_time_collate_fn(batch, args, device, data_type = "test",
data_min = data_min, data_max = data_max))
attr_names = train_dataset_obj.params
data_objects = {"dataset_obj": train_dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"attr": attr_names, #optional
"classif_per_tp": False, #optional
"n_labels": 1} #optional
return data_objects
##################################################################
# Human activity dataset
if dataset_name == "activity":
n_samples = min(10000, args.n)
dataset_obj = PersonActivity('data/PersonActivity',
download=True, n_samples = n_samples, device = device)
print(dataset_obj)
# Use custom collate_fn to combine samples with arbitrary time observations.
# Returns the dataset along with mask and time steps
# Shuffle and split
train_data, test_data = model_selection.train_test_split(dataset_obj, train_size= 0.8,
random_state = 42, shuffle = True)
train_data = [train_data[i] for i in np.random.choice(len(train_data), len(train_data))]
test_data = [test_data[i] for i in np.random.choice(len(test_data), len(test_data))]
record_id, tt, vals, mask, labels = train_data[0]
input_dim = vals.size(-1)
batch_size = min(min(len(dataset_obj), args.batch_size), args.n)
train_dataloader = DataLoader(train_data, batch_size= batch_size, shuffle=False,
collate_fn= lambda batch: variable_time_collate_fn_activity(batch, args, device, data_type = "train"))
test_dataloader = DataLoader(test_data, batch_size=n_samples, shuffle=False,
collate_fn= lambda batch: variable_time_collate_fn_activity(batch, args, device, data_type = "test"))
data_objects = {"dataset_obj": dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"classif_per_tp": True, #optional
"n_labels": labels.size(-1)}
return data_objects
########### 1d datasets ###########
# Sampling args.timepoints time points in the interval [0, args.max_t]
# Sample points for both training sequence and explapolation (test)
distribution = uniform.Uniform(torch.Tensor([0.0]),torch.Tensor([max_t_extrap]))
time_steps_extrap = distribution.sample(torch.Size([n_total_tp-1]))[:,0]
time_steps_extrap = torch.cat((torch.Tensor([0.0]), time_steps_extrap))
time_steps_extrap = torch.sort(time_steps_extrap)[0]
dataset_obj = None
##################################################################
# Sample a periodic function
if dataset_name == "periodic":
dataset_obj = Periodic_1d(
init_freq = None, init_amplitude = 1.,
final_amplitude = 1., final_freq = None,
z0 = 1.)
##################################################################
if dataset_obj is None:
raise Exception("Unknown dataset: {}".format(dataset_name))
dataset = dataset_obj.sample_traj(time_steps_extrap, n_samples = args.n,
noise_weight = args.noise_weight)
# Process small datasets
dataset = dataset.to(device)
time_steps_extrap = time_steps_extrap.to(device)
train_y, test_y = utils.split_train_test(dataset, train_fraq = 0.8)
n_samples = len(dataset)
input_dim = dataset.size(-1)
batch_size = min(args.batch_size, args.n)
train_dataloader = DataLoader(train_y, batch_size = batch_size, shuffle=False,
collate_fn= lambda batch: basic_collate_fn(batch, time_steps_extrap, data_type = "train"))
test_dataloader = DataLoader(test_y, batch_size = args.n, shuffle=False,
collate_fn= lambda batch: basic_collate_fn(batch, time_steps_extrap, data_type = "test"))
data_objects = {#"dataset_obj": dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader)}
return data_objects
| 9,406 | 37.871901 | 135 | py |
steer | steer-master/stiff_ode_experiments/stiff_ode_demo.py | import os
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=120) #default=1000)
parser.add_argument('--batch_time', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--test_freq', type=int, default=20)
parser.add_argument('--ntest', type=int, default=10)
parser.add_argument('--n_units', type=int, default=500)
parser.add_argument('--min_length', type=float, default=0.001)
parser.add_argument('--normal_std', type=float, default=0.01)
parser.add_argument('--stiffness_ratio', type=float, default=1000.0)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
parser.add_argument('--version', type=str, choices=['standard','steer','normal'], default='steer')
args = parser.parse_args()
torch.manual_seed(6)
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
from torchdiffeq import odeint_adjoint_stochastic_end_v3 as odeint_stochastic_end_v3
from torchdiffeq import odeint_adjoint_stochastic_end_normal as odeint_stochastic_end_normal
else:
from torchdiffeq import odeint_stochastic_end_v3
from torchdiffeq import odeint
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
true_y0 = torch.tensor([0.])
t = torch.linspace(0., 15., args.data_size)
test_t = torch.linspace(0., 25., args.data_size)
true_A = torch.tensor([[-0.1, 2.0], [-2.0, -0.1]])
class Lambda(nn.Module):
def forward(self, t, y):
t = t.unsqueeze(0)
equation = -1*y*args.stiffness_ratio + 3*args.stiffness_ratio - 2*args.stiffness_ratio * torch.exp(-1*t)
#equation = -1*y*args.stiffness_ratio + 3*args.stiffness_ratio - 2*args.stiffness_ratio * torch.exp(-1*t)# - 2*args.stiffness_ratio * torch.exp(-10000*t)
#equation = -1000*y + 3000 - 2000 * torch.exp(-t) + 1000 * torch.sin(t)
return equation
with torch.no_grad():
true_y = odeint(Lambda(), true_y0, t, method='dopri5')
true_y_test = odeint(Lambda(), true_y0, test_t, method='dopri5')
def get_batch():
s = torch.from_numpy(np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False))
batch_y0 = true_y[s] # (M, D)
batch_t = t[:args.batch_time] # (T)
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
return batch_y0, batch_t, batch_y
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
if args.viz:
makedirs('steer')
import matplotlib.pyplot as plt
def visualize(true_y, pred_y, odefunc, test_t, itr):
if args.viz:
plt.clf()
plt.xlabel('t')
plt.ylabel('y')
plt.plot(test_t.numpy(), true_y.numpy()[:, 0], 'g-', label='True')
plt.plot(test_t.numpy(), pred_y.numpy()[:, 0], 'b--' , label='Predicted' )
plt.ylim((-1, 25))
plt.legend(loc="upper right")
plt.tight_layout()
plt.savefig('steer/{:04d}'.format(itr))
plt.draw()
plt.pause(0.001)
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(2, args.n_units),
nn.Tanh(),
nn.Linear(args.n_units, args.n_units),
nn.Tanh(),
nn.Linear(args.n_units, 1),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1)
nn.init.constant_(m.bias, val=0)
def forward(self, t, y):
t=t.unsqueeze(0)
t = t.view(1,1)
y = y.view(y.size(0),1)
t = t.expand_as(y)
equation = torch.cat([t,y],1)
result = self.net(equation)
if y.size(0)==1:
result = result.squeeze()
return result
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
if __name__ == '__main__':
ii = 0
func = ODEFunc()
optimizer = optim.RMSprop(func.parameters(), lr=1e-4)
end = time.time()
time_meter = RunningAverageMeter(0.97)
loss_meter = RunningAverageMeter(0.97)
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
batch_y0, batch_t, batch_y = get_batch()
if args.version=='standard':
pred_y = odeint(func, batch_y0, batch_t)
elif args.version=='steer':
pred_y = odeint_stochastic_end_v3(func, batch_y0, batch_t,min_length=args.min_length,mode='train')
elif args.version=='normal':
pred_y = odeint_stochastic_end_normal(func, batch_y0, batch_t,std=args.normal_std,mode='train')
loss = torch.mean(torch.abs(pred_y - batch_y))
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
loss_meter.update(loss.item())
if itr % args.test_freq == 0:
with torch.no_grad():
pred_y = odeint(func, true_y0, test_t)
loss = torch.mean(torch.abs(pred_y - true_y_test))
print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
visualize(true_y_test, pred_y, func, test_t, ii )
ii += 1
end = time.time()
| 5,987 | 32.452514 | 161 | py |
steer | steer-master/torchdiffeq/setup.py | import setuptools
setuptools.setup(
name="torchdiffeq",
version="0.0.1",
author="Ricky Tian Qi Chen",
author_email="rtqichen@cs.toronto.edu",
description="ODE solvers and adjoint sensitivity analysis in PyTorch.",
url="https://github.com/arnabgho/torchdiffeq",
packages=['torchdiffeq', 'torchdiffeq._impl'],
install_requires=['torch>=0.4.1'],
classifiers=(
"Programming Language :: Python :: 3"),)
| 443 | 30.714286 | 75 | py |
steer | steer-master/torchdiffeq/tests/gradient_tests.py | import unittest
import torch
import torchdiffeq
from problems import construct_problem
eps = 1e-12
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
class TestGradient(unittest.TestCase):
def test_midpoint(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='midpoint')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_rk4(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='rk4')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_dopri5(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='dopri5')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adams(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='adams')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adaptive_heun(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='adaptive_heun')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adjoint(self):
"""
Test against dopri5
"""
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='dopri5')
ys = func(y0, t_points)
torch.manual_seed(0)
gradys = torch.rand_like(ys)
ys.backward(gradys)
# reg_y0_grad = y0.grad
reg_t_grad = t_points.grad
reg_a_grad = f.a.grad
reg_b_grad = f.b.grad
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint_adjoint(f, y0, t_points, method='dopri5')
ys = func(y0, t_points)
ys.backward(gradys)
# adj_y0_grad = y0.grad
adj_t_grad = t_points.grad
adj_a_grad = f.a.grad
adj_b_grad = f.b.grad
# self.assertLess(max_abs(reg_y0_grad - adj_y0_grad), eps)
self.assertLess(max_abs(reg_t_grad - adj_t_grad), eps)
self.assertLess(max_abs(reg_a_grad - adj_a_grad), eps)
self.assertLess(max_abs(reg_b_grad - adj_b_grad), eps)
class TestCompareAdjointGradient(unittest.TestCase):
def problem(self):
class Odefunc(torch.nn.Module):
def __init__(self):
super(Odefunc, self).__init__()
self.A = torch.nn.Parameter(torch.tensor([[-0.1, 2.0], [-2.0, -0.1]]))
self.unused_module = torch.nn.Linear(2, 5)
def forward(self, t, y):
return torch.mm(y**3, self.A)
y0 = torch.tensor([[2., 0.]]).to(TEST_DEVICE).requires_grad_(True)
t_points = torch.linspace(0., 25., 10).to(TEST_DEVICE).requires_grad_(True)
func = Odefunc().to(TEST_DEVICE)
return func, y0, t_points
def test_dopri5_adjoint_against_dopri5(self):
func, y0, t_points = self.problem()
ys = torchdiffeq.odeint_adjoint(func, y0, t_points, method='dopri5')
gradys = torch.rand_like(ys) * 0.1
ys.backward(gradys)
adj_y0_grad = y0.grad
adj_t_grad = t_points.grad
adj_A_grad = func.A.grad
self.assertEqual(max_abs(func.unused_module.weight.grad), 0)
self.assertEqual(max_abs(func.unused_module.bias.grad), 0)
func, y0, t_points = self.problem()
ys = torchdiffeq.odeint(func, y0, t_points, method='dopri5')
ys.backward(gradys)
self.assertLess(max_abs(y0.grad - adj_y0_grad), 3e-4)
self.assertLess(max_abs(t_points.grad - adj_t_grad), 1e-4)
self.assertLess(max_abs(func.A.grad - adj_A_grad), 2e-3)
def test_adams_adjoint_against_dopri5(self):
func, y0, t_points = self.problem()
ys_ = torchdiffeq.odeint_adjoint(func, y0, t_points, method='adams')
gradys = torch.rand_like(ys_) * 0.1
ys_.backward(gradys)
adj_y0_grad = y0.grad
adj_t_grad = t_points.grad
adj_A_grad = func.A.grad
self.assertEqual(max_abs(func.unused_module.weight.grad), 0)
self.assertEqual(max_abs(func.unused_module.bias.grad), 0)
func, y0, t_points = self.problem()
ys = torchdiffeq.odeint(func, y0, t_points, method='dopri5')
ys.backward(gradys)
self.assertLess(max_abs(y0.grad - adj_y0_grad), 5e-2)
self.assertLess(max_abs(t_points.grad - adj_t_grad), 5e-4)
self.assertLess(max_abs(func.A.grad - adj_A_grad), 2e-2)
if __name__ == '__main__':
unittest.main()
| 5,019 | 33.14966 | 96 | py |
steer | steer-master/torchdiffeq/tests/api_tests.py | import unittest
import torch
import torchdiffeq
from problems import construct_problem
eps = 1e-12
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
class TestCollectionState(unittest.TestCase):
def test_dopri5(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='dopri5')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_dopri5_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='dopri5')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adams(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='adams')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_adams_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='adams')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adaptive_heun(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='adaptive_heun')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_adaptive_heun_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='adaptive_heun')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
if __name__ == '__main__':
unittest.main()
| 2,805 | 32.011765 | 114 | py |
steer | steer-master/torchdiffeq/tests/run_all.py | import unittest
from api_tests import *
from gradient_tests import *
from odeint_tests import *
if __name__ == '__main__':
unittest.main()
| 144 | 17.125 | 28 | py |
steer | steer-master/torchdiffeq/tests/odeint_tests.py | import unittest
import torch
import torchdiffeq
import problems
error_tol = 1e-4
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
def rel_error(true, estimate):
return max_abs((true - estimate) / true)
class TestSolverError(unittest.TestCase):
def test_euler(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE)
y = torchdiffeq.odeint(f, y0, t_points, method='euler')
self.assertLess(rel_error(sol, y), error_tol)
def test_midpoint(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE)
y = torchdiffeq.odeint(f, y0, t_points, method='midpoint')
self.assertLess(rel_error(sol, y), error_tol)
def test_rk4(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE)
y = torchdiffeq.odeint(f, y0, t_points, method='rk4')
self.assertLess(rel_error(sol, y), error_tol)
def test_explicit_adams(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE)
y = torchdiffeq.odeint(f, y0, t_points, method='explicit_adams')
self.assertLess(rel_error(sol, y), error_tol)
def test_adams(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, ode=ode)
y = torchdiffeq.odeint(f, y0, t_points, method='adams')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_dopri5(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, ode=ode)
y = torchdiffeq.odeint(f, y0, t_points, method='dopri5')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_adaptive_heun(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, ode=ode)
y = torchdiffeq.odeint(f, y0, t_points, method='adaptive_heun')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_adjoint(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint_adjoint(f, y0, t_points, method='dopri5')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
class TestSolverBackwardsInTimeError(unittest.TestCase):
def test_euler(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='euler')
self.assertLess(rel_error(sol, y), error_tol)
def test_midpoint(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='midpoint')
self.assertLess(rel_error(sol, y), error_tol)
def test_rk4(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='rk4')
self.assertLess(rel_error(sol, y), error_tol)
def test_explicit_adams(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='explicit_adams')
self.assertLess(rel_error(sol, y), error_tol)
def test_adams(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='adams')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_dopri5(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='dopri5')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_adaptive_heun(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='adaptive_heun')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_adjoint(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint_adjoint(f, y0, t_points, method='dopri5')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
class TestNoIntegration(unittest.TestCase):
def test_midpoint(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='midpoint')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_rk4(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='rk4')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_explicit_adams(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='explicit_adams')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_adams(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='adams')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_dopri5(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='dopri5')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_dopri5(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='adaptive_heun')
self.assertLess(max_abs(sol[0] - y), error_tol)
if __name__ == '__main__':
unittest.main()
| 6,526 | 35.875706 | 88 | py |
steer | steer-master/torchdiffeq/tests/problems.py | import math
import numpy as np
import scipy.linalg
import torch
class ConstantODE(torch.nn.Module):
def __init__(self, device):
super(ConstantODE, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2).to(device))
self.b = torch.nn.Parameter(torch.tensor(3.0).to(device))
def forward(self, t, y):
return self.a + (y - (self.a * t + self.b))**5
def y_exact(self, t):
return self.a * t + self.b
class SineODE(torch.nn.Module):
def __init__(self, device):
super(SineODE, self).__init__()
def forward(self, t, y):
return 2 * y / t + t**4 * torch.sin(2 * t) - t**2 + 4 * t**3
def y_exact(self, t):
return -0.5 * t**4 * torch.cos(2 * t) + 0.5 * t**3 * torch.sin(2 * t) + 0.25 * t**2 * torch.cos(
2 * t
) - t**3 + 2 * t**4 + (math.pi - 0.25) * t**2
class LinearODE(torch.nn.Module):
def __init__(self, device, dim=10):
super(LinearODE, self).__init__()
self.dim = dim
U = torch.randn(dim, dim).to(device) * 0.1
A = 2 * U - (U + U.transpose(0, 1))
self.A = torch.nn.Parameter(A)
self.initial_val = np.ones((dim, 1))
def forward(self, t, y):
return torch.mm(self.A, y.reshape(self.dim, 1)).reshape(-1)
def y_exact(self, t):
t = t.detach().cpu().numpy()
A_np = self.A.detach().cpu().numpy()
ans = []
for t_i in t:
ans.append(np.matmul(scipy.linalg.expm(A_np * t_i), self.initial_val))
return torch.stack([torch.tensor(ans_) for ans_ in ans]).reshape(len(t), self.dim)
PROBLEMS = {'constant': ConstantODE, 'linear': LinearODE, 'sine': SineODE}
def construct_problem(device, npts=10, ode='constant', reverse=False):
f = PROBLEMS[ode](device)
t_points = torch.linspace(1, 8, npts).to(device).requires_grad_(True)
sol = f.y_exact(t_points)
def _flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device)
return x[tuple(indices)]
if reverse:
t_points = _flip(t_points, 0).clone().detach()
sol = _flip(sol, 0).clone().detach()
return f, sol[0].detach(), t_points, sol
if __name__ == '__main__':
f = SineODE('cpu')
t_points = torch.linspace(1, 8, 100).to('cpu').requires_grad_(True)
sol = f.y_exact(t_points)
import matplotlib.pyplot as plt
plt.plot(t_points.detach().cpu().numpy(), sol.detach().cpu().numpy())
plt.show()
| 2,533 | 28.126437 | 104 | py |
steer | steer-master/torchdiffeq/tests/DETEST/run.py | import time
import numpy as np
from scipy.stats.mstats import gmean
import torch
from torchdiffeq import odeint
import detest
torch.set_default_tensor_type(torch.DoubleTensor)
class NFEDiffEq:
def __init__(self, diffeq):
self.diffeq = diffeq
self.nfe = 0
def __call__(self, t, y):
self.nfe += 1
return self.diffeq(t, y)
def main():
sol = dict()
for method in ['dopri5', 'adams']:
for tol in [1e-3, 1e-6, 1e-9]:
print('======= {} | tol={:e} ======='.format(method, tol))
nfes = []
times = []
errs = []
for c in ['A', 'B', 'C', 'D', 'E']:
for i in ['1', '2', '3', '4', '5']:
diffeq, init, _ = getattr(detest, c + i)()
t0, y0 = init()
diffeq = NFEDiffEq(diffeq)
if not c + i in sol:
sol[c + i] = odeint(
diffeq, y0, torch.stack([t0, torch.tensor(20.)]), atol=1e-12, rtol=1e-12, method='dopri5'
)[1]
diffeq.nfe = 0
start_time = time.time()
est = odeint(diffeq, y0, torch.stack([t0, torch.tensor(20.)]), atol=tol, rtol=tol, method=method)
time_spent = time.time() - start_time
error = torch.sqrt(torch.mean((sol[c + i] - est[1])**2))
errs.append(error.item())
nfes.append(diffeq.nfe)
times.append(time_spent)
print('{}: NFE {} | Time {} | Err {:e}'.format(c + i, diffeq.nfe, time_spent, error.item()))
print('Total NFE {} | Total Time {} | GeomAvg Error {:e}'.format(np.sum(nfes), np.sum(times), gmean(errs)))
if __name__ == '__main__':
main()
| 1,843 | 29.733333 | 119 | py |
steer | steer-master/torchdiffeq/tests/DETEST/detest.py | import math
import torch
####################################
# Problem Class A. Single equations.
####################################
def A1():
diffeq = lambda t, y: -y
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: torch.exp(-t)
return diffeq, init, solution
def A2():
diffeq = lambda t, y: -y**3 / 2
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: 1 / torch.sqrt(t + 1)
return diffeq, init, solution
def A3():
diffeq = lambda t, y: y * torch.cos(t)
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: torch.exp(torch.sin(t))
return diffeq, init, solution
def A4():
diffeq = lambda t, y: y / 4 * (1 - y / 20)
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: 20 / (1 + 19 * torch.exp(-t / 4))
return diffeq, init, solution
def A5():
diffeq = lambda t, y: (y - t) / (y + t)
init = lambda: (torch.tensor(0.), torch.tensor(4.))
return diffeq, init, None
#################################
# Problem Class B. Small systems.
#################################
def B1():
def diffeq(t, y):
dy0 = 2 * (y[0] - y[0] * y[1])
dy1 = -(y[1] - y[0] * y[1])
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([1., 3.])
return diffeq, init, None
def B2():
A = torch.tensor([[-1., 1., 0.], [1., -2., 1.], [0., 1., -1.]])
def diffeq(t, y):
dy = torch.mv(A, y)
return dy
def init():
return torch.tensor(0.), torch.tensor([2., 0., 1.])
return diffeq, init, None
def B3():
def diffeq(t, y):
dy0 = -y[0]
dy1 = y[0] - y[1] * y[1]
dy2 = y[1] * y[1]
return torch.stack([dy0, dy1, dy2])
def init():
return torch.tensor(0.), torch.tensor([1., 0., 0.])
return diffeq, init, None
def B4():
def diffeq(t, y):
a = torch.sqrt(y[0] * y[0] + y[1] * y[1])
dy0 = -y[1] - y[0] * y[2] / a
dy1 = y[0] - y[1] * y[2] / a
dy2 = y[0] / a
return torch.stack([dy0, dy1, dy2])
def init():
return torch.tensor(0.), torch.tensor([3., 0., 0.])
return diffeq, init, None
def B5():
def diffeq(t, y):
dy0 = y[1] * y[2]
dy1 = -y[0] * y[2]
dy2 = -0.51 * y[0] * y[1]
return torch.stack([dy0, dy1, dy2])
def init():
return torch.tensor(0.), torch.tensor([0., 1., 1.])
return diffeq, init, None
####################################
# Problem Class C. Moderate systems.
####################################
def C1():
A = torch.zeros(10, 10)
A.view(-1)[:-1:11] = -1
A.view(-1)[10::11] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(10)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C2():
A = torch.zeros(10, 10)
A.view(-1)[:-1:11] = torch.linspace(-1, -9, 9)
A.view(-1)[10::11] = torch.linspace(1, 9, 9)
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(10)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C3():
n = 10
A = torch.zeros(n, n)
A.view(-1)[::n + 1] = -2
A.view(-1)[n::n + 1] = 1
A.view(-1)[1::n + 1] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(n)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C4():
n = 51
A = torch.zeros(n, n)
A.view(-1)[::n + 1] = -2
A.view(-1)[n::n + 1] = 1
A.view(-1)[1::n + 1] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(n)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C5():
k2 = torch.tensor(2.95912208286)
m0 = torch.tensor(1.00000597682)
m = torch.tensor([
0.000954786104043,
0.000285583733151,
0.0000437273164546,
0.0000517759138449,
0.00000277777777778,
]).view(1, 5)
def diffeq(t, y):
# y is 2 x 3 x 5
# y[0] contains y, y[0] contains y'
# second axis indexes space (x,y,z).
# third axis indexes 5 bodies.
dy = y[1, :, :]
y = y[0]
r = torch.sqrt(torch.sum(y**2, 0)).view(1, 5)
d = torch.sqrt(torch.sum((y[:, :, None] - y[:, None, :])**2, 0))
F = m.view(1, 1, 5) * ((y[:, None, :] - y[:, :, None]) / (d * d * d).view(1, 5, 5) + y.view(3, 1, 5) /
(r * r * r).view(1, 1, 5))
F.view(3, 5 * 5)[:, ::6] = 0
ddy = k2 * (-(m0 + m) * y / (r * r * r)) + F.sum(2)
return torch.stack([dy, ddy], 0)
def init():
y0 = torch.tensor([
3.42947415189, 3.35386959711, 1.35494901715, 6.64145542550, 5.97156957878, 2.18231499728, 11.2630437207,
14.6952576794, 6.27960525067, -30.1552268759, 165699966404, 1.43785752721, -21.1238353380, 28.4465098142,
15.388265967
]).view(5, 3).transpose(0, 1)
dy0 = torch.tensor([
-.557160570446, .505696783289, .230578543901, -.415570776342, .365682722812, .169143213293, -.325325669158,
.189706021964, .0877265322780, -.0240476254170, -.287659532608, -.117219543175, -.176860753121,
-.216393453025, -.0148647893090
]).view(5, 3).transpose(0, 1)
return torch.tensor(0.), torch.stack([y0, dy0], 0)
return diffeq, init, None
###################################
# Problem Class D. Orbit equations.
###################################
def _DTemplate(eps):
def diffeq(t, y):
r = (y[0]**2 + y[1]**2)**(3 / 2)
dy0 = y[2]
dy1 = y[3]
dy2 = -y[0] / r
dy3 = -y[1] / r
return torch.stack([dy0, dy1, dy2, dy3])
def init():
return torch.tensor(0.), torch.tensor([1 - eps, 0, 0, math.sqrt((1 + eps) / (1 - eps))])
return diffeq, init, None
D1 = lambda: _DTemplate(0.1)
D2 = lambda: _DTemplate(0.3)
D3 = lambda: _DTemplate(0.5)
D4 = lambda: _DTemplate(0.7)
D5 = lambda: _DTemplate(0.9)
##########################################
# Problem Class E. Higher order equations.
##########################################
def E1():
def diffeq(t, y):
dy0 = y[1]
dy1 = -(y[1] / (t + 1) + (1 - 0.25 / (t + 1)**2) * y[0])
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([.671396707141803, .0954005144474744])
return diffeq, init, None
def E2():
def diffeq(t, y):
dy0 = y[1]
dy1 = (1 - y[0]**2) * y[1] - y[0]
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([2., 0.])
return diffeq, init, None
def E3():
def diffeq(t, y):
dy0 = y[1]
dy1 = y[0]**3 / 6 - y[0] + 2 * torch.sin(2.78535 * t)
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([0., 0.])
return diffeq, init, None
def E4():
def diffeq(t, y):
dy0 = y[1]
dy1 = .32 - .4 * y[1]**2
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([30., 0.])
return diffeq, init, None
def E5():
def diffeq(t, y):
dy0 = y[1]
dy1 = torch.sqrt(1 + y[1]**2) / (25 - t)
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([0., 0.])
return diffeq, init, None
###################
# Helper functions.
###################
def _to_tensor(x):
if not torch.is_tensor(x):
x = torch.tensor(x)
return x
| 7,740 | 22.107463 | 119 | py |
steer | steer-master/torchdiffeq/torchdiffeq/__init__.py | from ._impl import odeint
from ._impl import odeint_adjoint
from ._impl import odeint_skip_step
from ._impl import odeint_stochastic_end
from ._impl import odeint_stochastic_end_v2
from ._impl import odeint_stochastic_end_v3
from ._impl import odeint_adjoint_skip_step
from ._impl import odeint_adjoint_stochastic_end
from ._impl import odeint_adjoint_stochastic_end_v2
from ._impl import odeint_adjoint_stochastic_end_v3
from ._impl import odeint_stochastic_end_v2_inference
| 476 | 38.75 | 53 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_normal.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end_normal(func, y0, actual_t, rtol=1e-7, atol=1e-9, shrink_proportion = 0.5, shrink_std = 0.02, method=None, options=None, mode='train',std = 0.001 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if t.size(0)!=2:
return odeint(func,y0,actual_t)
integration_time = t.type_as(y0)#integration_time.type_as(x)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
#range_time = abs(t[1]-t[0]) - min_length
#print("range_time")
#print(range_time)
#print("shrink_std")
#print(shrink_std)
#print("shrink_proportion")
#print(shrink_proportion)
#m = normal.Normal(t[0]+shrink_proportion, shrink_std)
#m = uniform.Uniform(t[1] - shrink_std , t[1] + shrink_std)
m = normal.Normal(t[1] , std)
integration_time[0]=t[0]
integration_time[1]= m.sample() #max(m.sample(), t[0] + min_length)#m.sample() #
#integration_time[1]= max(m.sample(), t[0] + min_length)#m.sample() #
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev and mode=='train':
integration_time = reverse_time(integration_time)
out = odeint( func, y0, integration_time)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,776 | 35.511737 | 175 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end_normal.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_stochastic_end_normal(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, shrink_proportion = 0.5, shrink_std = 0.02 , mode='train', std=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if isinstance(y0, tuple):
integration_time = t.type_as(y0[0])#integration_time.type_as(x)
else:
integration_time = t.type_as(y0)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
#range_time = abs(t[1]-t[0]) - min_length
#m = normal.Normal(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
#m = uniform.Uniform(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
m = normal.Normal(t[1] , std)
integration_time[0]=t[0]
integration_time[1]= m.sample() #.clamp( t[1] - range_time , t[1] + range_time)
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev:
integration_time = reverse_time(integration_time)
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
| 4,034 | 35.351351 | 181 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
tensor_input, func, y0, t = _check_inputs(func, y0, t)
if options is None:
options = {}
elif method is None:
raise ValueError('cannot supply `options` without specifying `method`')
if method is None:
method = 'dopri5'
solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
solution = solver.integrate(t)
if tensor_input:
solution = solution[0]
return solution
| 3,113 | 36.518072 | 86 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/adjoint.py | import torch
import torch.nn as nn
from . import odeint
from .misc import _flatten, _flatten_convert_none_to_zeros
class OdeintAdjointMethod(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
assert len(args) >= 8, 'Internal error: all arguments required.'
y0, func, t, flat_params, rtol, atol, method, options = \
args[:-7], args[-7], args[-6], args[-5], args[-4], args[-3], args[-2], args[-1]
ctx.func, ctx.rtol, ctx.atol, ctx.method, ctx.options = func, rtol, atol, method, options
with torch.no_grad():
ans = odeint(func, y0, t, rtol=rtol, atol=atol, method=method, options=options)
ctx.save_for_backward(t, flat_params, *ans)
return ans
@staticmethod
def backward(ctx, *grad_output):
t, flat_params, *ans = ctx.saved_tensors
ans = tuple(ans)
func, rtol, atol, method, options = ctx.func, ctx.rtol, ctx.atol, ctx.method, ctx.options
n_tensors = len(ans)
f_params = tuple(func.parameters())
# TODO: use a nn.Module and call odeint_adjoint to implement higher order derivatives.
def augmented_dynamics(t, y_aug):
# Dynamics of the original system augmented with
# the adjoint wrt y, and an integrator wrt t and args.
y, adj_y = y_aug[:n_tensors], y_aug[n_tensors:2 * n_tensors] # Ignore adj_time and adj_params.
with torch.set_grad_enabled(True):
t = t.to(y[0].device).detach().requires_grad_(True)
y = tuple(y_.detach().requires_grad_(True) for y_ in y)
func_eval = func(t, y)
vjp_t, *vjp_y_and_params = torch.autograd.grad(
func_eval, (t,) + y + f_params,
tuple(-adj_y_ for adj_y_ in adj_y), allow_unused=True, retain_graph=True
)
vjp_y = vjp_y_and_params[:n_tensors]
vjp_params = vjp_y_and_params[n_tensors:]
# autograd.grad returns None if no gradient, set to zero.
vjp_t = torch.zeros_like(t) if vjp_t is None else vjp_t
vjp_y = tuple(torch.zeros_like(y_) if vjp_y_ is None else vjp_y_ for vjp_y_, y_ in zip(vjp_y, y))
vjp_params = _flatten_convert_none_to_zeros(vjp_params, f_params)
if len(f_params) == 0:
vjp_params = torch.tensor(0.).to(vjp_y[0])
return (*func_eval, *vjp_y, vjp_t, vjp_params)
T = ans[0].shape[0]
with torch.no_grad():
adj_y = tuple(grad_output_[-1] for grad_output_ in grad_output)
adj_params = torch.zeros_like(flat_params)
adj_time = torch.tensor(0.).to(t)
time_vjps = []
for i in range(T - 1, 0, -1):
ans_i = tuple(ans_[i] for ans_ in ans)
grad_output_i = tuple(grad_output_[i] for grad_output_ in grad_output)
func_i = func(t[i], ans_i)
# Compute the effect of moving the current time measurement point.
dLd_cur_t = sum(
torch.dot(func_i_.reshape(-1), grad_output_i_.reshape(-1)).reshape(1)
for func_i_, grad_output_i_ in zip(func_i, grad_output_i)
)
adj_time = adj_time - dLd_cur_t
time_vjps.append(dLd_cur_t)
# Run the augmented system backwards in time.
if adj_params.numel() == 0:
adj_params = torch.tensor(0.).to(adj_y[0])
aug_y0 = (*ans_i, *adj_y, adj_time, adj_params)
aug_ans = odeint(
augmented_dynamics, aug_y0,
torch.tensor([t[i], t[i - 1]]), rtol=rtol, atol=atol, method=method, options=options
)
# Unpack aug_ans.
adj_y = aug_ans[n_tensors:2 * n_tensors]
adj_time = aug_ans[2 * n_tensors]
adj_params = aug_ans[2 * n_tensors + 1]
adj_y = tuple(adj_y_[1] if len(adj_y_) > 0 else adj_y_ for adj_y_ in adj_y)
if len(adj_time) > 0: adj_time = adj_time[1]
if len(adj_params) > 0: adj_params = adj_params[1]
adj_y = tuple(adj_y_ + grad_output_[i - 1] for adj_y_, grad_output_ in zip(adj_y, grad_output))
del aug_y0, aug_ans
time_vjps.append(adj_time)
time_vjps = torch.cat(time_vjps[::-1])
return (*adj_y, None, time_vjps, adj_params, None, None, None, None, None)
def odeint_adjoint(func, y0, t, rtol=1e-6, atol=1e-12, method=None, options=None):
# We need this in order to access the variables inside this module,
# since we have no other way of getting variables along the execution path.
if not isinstance(func, nn.Module):
raise ValueError('func is required to be an instance of nn.Module.')
tensor_input = False
if torch.is_tensor(y0):
class TupleFunc(nn.Module):
def __init__(self, base_func):
super(TupleFunc, self).__init__()
self.base_func = base_func
def forward(self, t, y):
return (self.base_func(t, y[0]),)
tensor_input = True
y0 = (y0,)
func = TupleFunc(func)
flat_params = _flatten(func.parameters())
ys = OdeintAdjointMethod.apply(*y0, func, t, flat_params, rtol, atol, method, options)
if tensor_input:
ys = ys[0]
return ys
| 5,471 | 39.835821 | 111 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_skip_step.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_skip_step(func, y0, actual_t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
integration_time = t.type_as(y0)#integration_time.type_as(x)
range_time = t[1]-t[0]
skip = range_time * skip_proportion
rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
rand_points[0]=t[0]
rand_points[num_skips-1]=t[1]
integration_time[0]=rand_points[0]
integration_time[1]=rand_points[1]
print("integration_time")
print(integration_time)
out = odeint( func, y0, integration_time)
first = out[0]
for i in range(1,rand_points.shape[0]-1):
integration_time[0]=rand_points[i] + skip
integration_time[1]=rand_points[i+1]
if (integration_time[1] - integration_time[0]) > skip :
out = odeint( func, out[1], integration_time)
print("integration_time_inside")
print(integration_time)
result = out.clone()
result[0] = first
return result
#def odeint_skip_step(func, y0, actual_t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# t = actual_t.clone()
# integration_time = t.type_as(y0)#integration_time.type_as(x)
# range_time = t[1]-t[0]
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=2*num_skips + 2))
# rand_points[0]=t[0]
# rand_points[2*num_skips+1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
#
# out = odeint( func, y0, integration_time)
# first = out[0]
#
# for i in range(1,rand_points.shape[0]-1):
# if i % 2 == 1:
# continue
# integration_time[0]=rand_points[i]
# integration_time[1]=rand_points[i+1]
# out = odeint( func, out[1], integration_time)
#
# result = out.clone()
# result[0] = first
# return result
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 10,205 | 37.659091 | 132 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_v2_inference.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
from torch.distributions import normal
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end_v2_inference(func, y0, actual_t, rtol=1e-7, atol=1e-9, shrink_proportion = 0.5, shrink_std = 0.02, method=None, options=None, mode='train',min_length=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
num_times = actual_t.size(0)
t = actual_t.clone()
# output shape : (timesteps, batch_size, dimension)
result = y0.unsqueeze(0)
integration_time = torch.Tensor([0.0,1.0])
t = torch.Tensor([0.0,1.0])
integration_time = integration_time.type_as(y0)#integration_time.type_as(x)
t = t.type_as(y0)#integration_time.type_as(x)
for i in range(num_times-1):
t[0] = actual_t[i]
t[1] = actual_t[i+1]
if abs(t[1]-t[0])<min_length:
continue
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = (t[1]-t[0]) * shrink_proportion
integration_time[0]=t[0]
integration_time[1]= t[0] + range_time
#print("integration_time")
#print(integration_time)
#print(t[1])
#print(t[0])
if rev:
integration_time = reverse_time(integration_time)
out = odeint( func, y0, integration_time)
#print("out.size()")
#print(out.size())
y0 = out[1].reshape_as(y0)
#print("result.size()")
#print(result.size())
#print("y0.size()")
#print(y0.size())
y0=y0.unsqueeze(0)
result = torch.cat((result,y0),0)
y0=y0.squeeze(0)
return result
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,970 | 34.744395 | 185 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end_v3.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_stochastic_end_v3(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, shrink_proportion = 0.5, shrink_std = 0.02 , mode='train', min_length=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if isinstance(y0, tuple):
integration_time = t.type_as(y0[0])#integration_time.type_as(x)
else:
integration_time = t.type_as(y0)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = abs(t[1]-t[0]) - min_length
#m = normal.Normal(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
#m = uniform.Uniform(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
m = uniform.Uniform(t[1] - range_time , t[1] + range_time)
integration_time[0]=t[0]
integration_time[1]= m.sample()
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev:
integration_time = reverse_time(integration_time)
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
| 4,017 | 35.198198 | 184 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end(func, y0, actual_t, rtol=1e-7, atol=1e-9, num_skips = 5, skip_proportion = 0.01, method=None, options=None ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
integration_time = t.type_as(y0)#integration_time.type_as(x)
range_time = t[1]-t[0]
skip = range_time * skip_proportion
rand_points = np.sort(np.random.uniform(t[0], t[1],size=2*num_skips + 2))
rand_points[0]=t[0]
rand_points[2*num_skips+1]=t[1] + skip
integration_time[0]=rand_points[0]
integration_time[1]=rand_points[1]
#print("rand_points")
#print(rand_points)
#print("..................")
#print("t")
#print(t)
#print("--------------------")
#print("integration_time")
#print(integration_time)
#print("====================")
out = odeint( func, y0, integration_time)
return out
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,165 | 35.01005 | 137 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/adaptive_heun.py | # Based on https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/integrate
import torch
from .misc import (
_scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
from .solvers import AdaptiveStepsizeODESolver
from .interp import _interp_fit, _interp_evaluate
from .rk_common import _RungeKuttaState, _ButcherTableau, _runge_kutta_step
_ADAPTIVE_HEUN_TABLEAU = _ButcherTableau(
alpha=[1.],
beta=[
[1.],
],
c_sol=[0.5, 0.5],
c_error=[
0.5,
-0.5,
],
)
AH_C_MID = [
0.5, 0.
]
def _interp_fit_adaptive_heun(y0, y1, k, dt, tableau=_ADAPTIVE_HEUN_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
dt = dt.type_as(y0[0])
y_mid = tuple(y0_ + _scaled_dot_product(dt, AH_C_MID, k_) for y0_, k_ in zip(y0, k))
f0 = tuple(k_[0] for k_ in k)
f1 = tuple(k_[-1] for k_ in k)
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _abs_square(x):
return torch.mul(x, x)
def _ta_append(list_of_tensors, value):
"""Append a value to the end of a list of PyTorch tensors."""
list_of_tensors.append(value)
return list_of_tensors
class AdaptiveHeunSolver(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
def before_integrate(self, t):
f0 = self.func(t[0].type_as(self.y0[0]), self.y0)
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 1, self.rtol[0], self.atol[0], f0=f0).to(t)
else:
first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)
self.rk_state = _RungeKuttaState(self.y0, f0, t[0], t[0], first_step, interp_coeff=[self.y0] * 5)
def advance(self, next_t):
"""Interpolate through the next time point, integrating as necessary."""
n_steps = 0
while next_t > self.rk_state.t1:
assert n_steps < self.max_num_steps, 'max_num_steps exceeded ({}>={})'.format(n_steps, self.max_num_steps)
self.rk_state = self._adaptive_heun_step(self.rk_state)
n_steps += 1
return _interp_evaluate(self.rk_state.interp_coeff, self.rk_state.t0, self.rk_state.t1, next_t)
def _adaptive_heun_step(self, rk_state):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
########################################################
# Assertions #
########################################################
assert t0 + dt > t0, 'underflow in dt {}'.format(dt.item())
for y0_ in y0:
assert _is_finite(torch.abs(y0_)), 'non-finite values in state `y`: {}'.format(y0_)
y1, f1, y1_error, k = _runge_kutta_step(self.func, y0, f0, t0, dt, tableau=_ADAPTIVE_HEUN_TABLEAU)
########################################################
# Error Ratio #
########################################################
mean_sq_error_ratio = _compute_error_ratio(y1_error, atol=self.atol, rtol=self.rtol, y0=y0, y1=y1)
accept_step = (torch.tensor(mean_sq_error_ratio) <= 1).all()
########################################################
# Update RK State #
########################################################
y_next = y1 if accept_step else y0
f_next = f1 if accept_step else f0
t_next = t0 + dt if accept_step else t0
interp_coeff = _interp_fit_adaptive_heun(y0, y1, k, dt) if accept_step else interp_coeff
dt_next = _optimal_step_size(
dt, mean_sq_error_ratio, safety=self.safety, ifactor=self.ifactor, dfactor=self.dfactor, order=5
)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next, interp_coeff)
return rk_state
| 4,839 | 42.214286 | 118 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_v3.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end_v3(func, y0, actual_t, rtol=1e-7, atol=1e-9, shrink_proportion = 0.5, shrink_std = 0.02, method=None, options=None, mode='train',min_length=0.001 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if t.size(0)!=2:
return odeint(func,y0,actual_t)
integration_time = t.type_as(y0)#integration_time.type_as(x)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = abs(t[1]-t[0]) - min_length
#print("range_time")
#print(range_time)
#print("shrink_std")
#print(shrink_std)
#print("shrink_proportion")
#print(shrink_proportion)
#m = normal.Normal(t[0]+shrink_proportion, shrink_std)
#m = uniform.Uniform(t[1] - shrink_std , t[1] + shrink_std)
m = uniform.Uniform(t[1] - range_time , t[1] + range_time)
integration_time[0]=t[0]
integration_time[1]= m.sample() #max(m.sample(), t[0] + min_length)#m.sample() #
#integration_time[1]= max(m.sample(), t[0] + min_length)#m.sample() #
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev and mode=='train':
integration_time = reverse_time(integration_time)
out = odeint( func, y0, integration_time)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,805 | 35.647887 | 176 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/bosh3.py | import torch
from .misc import (
_scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
from .solvers import AdaptiveStepsizeODESolver
from .interp import _interp_fit, _interp_evaluate
from .rk_common import _RungeKuttaState, _ButcherTableau, _runge_kutta_step
_BOGACKI_SHAMPINE_TABLEAU = _ButcherTableau(
alpha=[1/2, 3/4, 1.],
beta=[
[1/2],
[0., 3/4],
[2/9, 1/3, 4/9]
],
c_sol=[2/9, 1/3, 4/9, 0.],
c_error=[2/9-7/24, 1/3-1/4, 4/9-1/3, -1/8],
)
BS_C_MID = [ 0., 0.5, 0., 0. ]
def _interp_fit_bosh3(y0, y1, k, dt):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
dt = dt.type_as(y0[0])
y_mid = tuple(y0_ + _scaled_dot_product(dt, BS_C_MID, k_) for y0_, k_ in zip(y0, k))
f0 = tuple(k_[0] for k_ in k)
f1 = tuple(k_[-1] for k_ in k)
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
class Bosh3Solver(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
def before_integrate(self, t):
f0 = self.func(t[0].type_as(self.y0[0]), self.y0)
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 2, self.rtol[0], self.atol[0], f0=f0).to(t)
else:
first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)
self.rk_state = _RungeKuttaState(self.y0, f0, t[0], t[0], first_step, interp_coeff=[self.y0] * 5)
def advance(self, next_t):
"""Interpolate through the next time point, integrating as necessary."""
n_steps = 0
while next_t > self.rk_state.t1:
assert n_steps < self.max_num_steps, 'max_num_steps exceeded ({}>={})'.format(n_steps, self.max_num_steps)
self.rk_state = self._adaptive_bosh3_step(self.rk_state)
n_steps += 1
return _interp_evaluate(self.rk_state.interp_coeff, self.rk_state.t0, self.rk_state.t1, next_t)
def _adaptive_bosh3_step(self, rk_state):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
########################################################
# Assertions #
########################################################
assert t0 + dt > t0, 'underflow in dt {}'.format(dt.item())
for y0_ in y0:
assert _is_finite(torch.abs(y0_)), 'non-finite values in state `y`: {}'.format(y0_)
y1, f1, y1_error, k = _runge_kutta_step(self.func, y0, f0, t0, dt, tableau=_BOGACKI_SHAMPINE_TABLEAU)
########################################################
# Error Ratio #
########################################################
mean_sq_error_ratio = _compute_error_ratio(y1_error, atol=self.atol, rtol=self.rtol, y0=y0, y1=y1)
accept_step = (torch.tensor(mean_sq_error_ratio) <= 1).all()
########################################################
# Update RK State #
########################################################
y_next = y1 if accept_step else y0
f_next = f1 if accept_step else f0
t_next = t0 + dt if accept_step else t0
interp_coeff = _interp_fit_bosh3(y0, y1, k, dt) if accept_step else interp_coeff
dt_next = _optimal_step_size(
dt, mean_sq_error_ratio, safety=self.safety, ifactor=self.ifactor, dfactor=self.dfactor, order=3
)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next, interp_coeff)
return rk_state
| 4,552 | 44.989899 | 118 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/misc.py | import warnings
import torch
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def _flatten_convert_none_to_zeros(sequence, like_sequence):
flat = [
p.contiguous().view(-1) if p is not None else torch.zeros_like(q).view(-1)
for p, q in zip(sequence, like_sequence)
]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def _possibly_nonzero(x):
return isinstance(x, torch.Tensor) or x != 0
def _scaled_dot_product(scale, xs, ys):
"""Calculate a scaled, vector inner product between lists of Tensors."""
# Using _possibly_nonzero lets us avoid wasted computation.
return sum([(scale * x) * y for x, y in zip(xs, ys) if _possibly_nonzero(x) or _possibly_nonzero(y)])
def _dot_product(xs, ys):
"""Calculate the vector inner product between two lists of Tensors."""
return sum([x * y for x, y in zip(xs, ys)])
def _has_converged(y0, y1, rtol, atol):
"""Checks that each element is within the error tolerance."""
error_tol = tuple(atol + rtol * torch.max(torch.abs(y0_), torch.abs(y1_)) for y0_, y1_ in zip(y0, y1))
error = tuple(torch.abs(y0_ - y1_) for y0_, y1_ in zip(y0, y1))
return all((error_ < error_tol_).all() for error_, error_tol_ in zip(error, error_tol))
def _convert_to_tensor(a, dtype=None, device=None):
if not isinstance(a, torch.Tensor):
a = torch.tensor(a)
if dtype is not None:
a = a.type(dtype)
if device is not None:
a = a.to(device)
return a
def _is_finite(tensor):
_check = (tensor == float('inf')) + (tensor == float('-inf')) + torch.isnan(tensor)
return not _check.any()
def _decreasing(t):
return (t[1:] < t[:-1]).all()
def _assert_increasing(t):
assert (t[1:] > t[:-1]).all(), 't must be strictly increasing or decreasing'
def _is_iterable(inputs):
try:
iter(inputs)
return True
except TypeError:
return False
def _norm(x):
"""Compute RMS norm."""
if torch.is_tensor(x):
return x.norm() / (x.numel()**0.5)
else:
return torch.sqrt(sum(x_.norm()**2 for x_ in x) / sum(x_.numel() for x_ in x))
def _handle_unused_kwargs(solver, unused_kwargs):
if len(unused_kwargs) > 0:
warnings.warn('{}: Unexpected arguments {}'.format(solver.__class__.__name__, unused_kwargs))
def _select_initial_step(fun, t0, y0, order, rtol, atol, f0=None):
"""Empirically select a good initial step.
The algorithm is described in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t0 : float
Initial value of the independent variable.
y0 : ndarray, shape (n,)
Initial value of the dependent variable.
direction : float
Integration direction.
order : float
Method order.
rtol : float
Desired relative tolerance.
atol : float
Desired absolute tolerance.
Returns
-------
h_abs : float
Absolute value of the suggested initial step.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
t0 = t0.to(y0[0])
if f0 is None:
f0 = fun(t0, y0)
rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
atol = atol if _is_iterable(atol) else [atol] * len(y0)
scale = tuple(atol_ + torch.abs(y0_) * rtol_ for y0_, atol_, rtol_ in zip(y0, atol, rtol))
d0 = tuple(_norm(y0_ / scale_) for y0_, scale_ in zip(y0, scale))
d1 = tuple(_norm(f0_ / scale_) for f0_, scale_ in zip(f0, scale))
if max(d0).item() < 1e-5 or max(d1).item() < 1e-5:
h0 = torch.tensor(1e-6).to(t0)
else:
h0 = 0.01 * max(d0_ / d1_ for d0_, d1_ in zip(d0, d1))
y1 = tuple(y0_ + h0 * f0_ for y0_, f0_ in zip(y0, f0))
f1 = fun(t0 + h0, y1)
d2 = tuple(_norm((f1_ - f0_) / scale_) / h0 for f1_, f0_, scale_ in zip(f1, f0, scale))
if max(d1).item() <= 1e-15 and max(d2).item() <= 1e-15:
h1 = torch.max(torch.tensor(1e-6).to(h0), h0 * 1e-3)
else:
h1 = (0.01 / max(d1 + d2))**(1. / float(order + 1))
return torch.min(100 * h0, h1)
def _compute_error_ratio(error_estimate, error_tol=None, rtol=None, atol=None, y0=None, y1=None):
if error_tol is None:
assert rtol is not None and atol is not None and y0 is not None and y1 is not None
rtol if _is_iterable(rtol) else [rtol] * len(y0)
atol if _is_iterable(atol) else [atol] * len(y0)
error_tol = tuple(
atol_ + rtol_ * torch.max(torch.abs(y0_), torch.abs(y1_))
for atol_, rtol_, y0_, y1_ in zip(atol, rtol, y0, y1)
)
error_ratio = tuple(error_estimate_ / error_tol_ for error_estimate_, error_tol_ in zip(error_estimate, error_tol))
mean_sq_error_ratio = tuple(torch.mean(error_ratio_ * error_ratio_) for error_ratio_ in error_ratio)
return mean_sq_error_ratio
def _optimal_step_size(last_step, mean_error_ratio, safety=0.9, ifactor=10.0, dfactor=0.2, order=5):
"""Calculate the optimal size for the next step."""
mean_error_ratio = max(mean_error_ratio) # Compute step size based on highest ratio.
if mean_error_ratio == 0:
return last_step * ifactor
if mean_error_ratio < 1:
dfactor = _convert_to_tensor(1, dtype=torch.float64, device=mean_error_ratio.device)
error_ratio = torch.sqrt(mean_error_ratio).to(last_step)
exponent = torch.tensor(1 / order).to(last_step)
factor = torch.max(1 / ifactor, torch.min(error_ratio**exponent / safety, 1 / dfactor))
return last_step / factor
def _check_inputs(func, y0, t):
tensor_input = False
if torch.is_tensor(y0):
tensor_input = True
y0 = (y0,)
_base_nontuple_func_ = func
func = lambda t, y: (_base_nontuple_func_(t, y[0]),)
assert isinstance(y0, tuple), 'y0 must be either a torch.Tensor or a tuple'
for y0_ in y0:
assert torch.is_tensor(y0_), 'each element must be a torch.Tensor but received {}'.format(type(y0_))
if _decreasing(t):
t = -t
_base_reverse_func = func
func = lambda t, y: tuple(-f_ for f_ in _base_reverse_func(-t, y))
for y0_ in y0:
if not torch.is_floating_point(y0_):
raise TypeError('`y0` must be a floating point Tensor but is a {}'.format(y0_.type()))
if not torch.is_floating_point(t):
raise TypeError('`t` must be a floating point Tensor but is a {}'.format(t.type()))
return tensor_input, func, y0, t
| 6,621 | 32.785714 | 119 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_stochastic_end(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, num_skips = 10, skip_proportion = 0.80 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if isinstance(y0, tuple):
integration_time = t.type_as(y0[0])#integration_time.type_as(x)
else:
integration_time = t.type_as(y0)
range_time = t[1]-t[0]
skip = range_time * skip_proportion
rand_points= torch.Tensor(num_skips+2).type_as(integration_time)
rand_points.uniform_(t[0],t[1])
rand_points , _ = rand_points.sort()
#rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
rand_points[0]=t[0]
#rand_points[num_skips-1]=t[1]
integration_time[0]=rand_points[0]
integration_time[1]=rand_points[1] + skip
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
return out
| 3,574 | 38.722222 | 146 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/interp.py | import torch
from .misc import _convert_to_tensor, _dot_product
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
a = tuple(
_dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
b = tuple(
_dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
c = tuple(
_dot_product([-4 * dt, dt, -11, -5, 16], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
d = tuple(dt * f0_ for f0_ in f0)
e = y0
return [a, b, c, d, e]
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
dtype = coefficients[0][0].dtype
device = coefficients[0][0].device
t0 = _convert_to_tensor(t0, dtype=dtype, device=device)
t1 = _convert_to_tensor(t1, dtype=dtype, device=device)
t = _convert_to_tensor(t, dtype=dtype, device=device)
assert (t0 <= t) & (t <= t1), 'invalid interpolation, fails `t0 <= t <= t1`: {}, {}, {}'.format(t0, t, t1)
x = ((t - t0) / (t1 - t0)).type(dtype).to(device)
xs = [torch.tensor(1).type(dtype).to(device), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return tuple(_dot_product(coefficients_, reversed(xs)) for coefficients_ in zip(*coefficients))
| 2,501 | 36.909091 | 110 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/tsit5.py | import torch
from .misc import _scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs
from .solvers import AdaptiveStepsizeODESolver
from .rk_common import _RungeKuttaState, _ButcherTableau, _runge_kutta_step
# Parameters from Tsitouras (2011).
_TSITOURAS_TABLEAU = _ButcherTableau(
alpha=[0.161, 0.327, 0.9, 0.9800255409045097, 1., 1.],
beta=[
[0.161],
[-0.008480655492357, 0.3354806554923570],
[2.897153057105494, -6.359448489975075, 4.362295432869581],
[5.32586482843925895, -11.74888356406283, 7.495539342889836, -0.09249506636175525],
[5.86145544294642038, -12.92096931784711, 8.159367898576159, -0.071584973281401006, -0.02826905039406838],
[0.09646076681806523, 0.01, 0.4798896504144996, 1.379008574103742, -3.290069515436081, 2.324710524099774],
],
c_sol=[0.09646076681806523, 0.01, 0.4798896504144996, 1.379008574103742, -3.290069515436081, 2.324710524099774, 0],
c_error=[
0.09646076681806523 - 0.001780011052226,
0.01 - 0.000816434459657,
0.4798896504144996 - -0.007880878010262,
1.379008574103742 - 0.144711007173263,
-3.290069515436081 - -0.582357165452555,
2.324710524099774 - 0.458082105929187,
-1 / 66,
],
)
def _interp_coeff_tsit5(t0, dt, eval_t):
t = float((eval_t - t0) / dt)
b1 = -1.0530884977290216 * t * (t - 1.3299890189751412) * (t**2 - 1.4364028541716351 * t + 0.7139816917074209)
b2 = 0.1017 * t**2 * (t**2 - 2.1966568338249754 * t + 1.2949852507374631)
b3 = 2.490627285651252793 * t**2 * (t**2 - 2.38535645472061657 * t + 1.57803468208092486)
b4 = -16.54810288924490272 * (t - 1.21712927295533244) * (t - 0.61620406037800089) * t**2
b5 = 47.37952196281928122 * (t - 1.203071208372362603) * (t - 0.658047292653547382) * t**2
b6 = -34.87065786149660974 * (t - 1.2) * (t - 0.666666666666666667) * t**2
b7 = 2.5 * (t - 1) * (t - 0.6) * t**2
return [b1, b2, b3, b4, b5, b6, b7]
def _interp_eval_tsit5(t0, t1, k, eval_t):
dt = t1 - t0
y0 = tuple(k_[0] for k_ in k)
interp_coeff = _interp_coeff_tsit5(t0, dt, eval_t)
y_t = tuple(y0_ + _scaled_dot_product(dt, interp_coeff, k_) for y0_, k_ in zip(y0, k))
return y_t
def _optimal_step_size(last_step, mean_error_ratio, safety=0.9, ifactor=10.0, dfactor=0.2, order=5):
"""Calculate the optimal size for the next Runge-Kutta step."""
if mean_error_ratio == 0:
return last_step * ifactor
if mean_error_ratio < 1:
dfactor = _convert_to_tensor(1, dtype=torch.float64, device=mean_error_ratio.device)
error_ratio = torch.sqrt(mean_error_ratio).type_as(last_step)
exponent = torch.tensor(1 / order).type_as(last_step)
factor = torch.max(1 / ifactor, torch.min(error_ratio**exponent / safety, 1 / dfactor))
return last_step / factor
def _abs_square(x):
return torch.mul(x, x)
class Tsit5Solver(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol
self.atol = atol
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
def before_integrate(self, t):
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 4, self.rtol, self.atol).to(t)
else:
first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)
self.rk_state = _RungeKuttaState(
self.y0,
self.func(t[0].type_as(self.y0[0]), self.y0), t[0], t[0], first_step,
tuple(map(lambda x: [x] * 7, self.y0))
)
def advance(self, next_t):
"""Interpolate through the next time point, integrating as necessary."""
n_steps = 0
while next_t > self.rk_state.t1:
assert n_steps < self.max_num_steps, 'max_num_steps exceeded ({}>={})'.format(n_steps, self.max_num_steps)
self.rk_state = self._adaptive_tsit5_step(self.rk_state)
n_steps += 1
return _interp_eval_tsit5(self.rk_state.t0, self.rk_state.t1, self.rk_state.interp_coeff, next_t)
def _adaptive_tsit5_step(self, rk_state):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, _ = rk_state
########################################################
# Assertions #
########################################################
assert t0 + dt > t0, 'underflow in dt {}'.format(dt.item())
for y0_ in y0:
assert _is_finite(torch.abs(y0_)), 'non-finite values in state `y`: {}'.format(y0_)
y1, f1, y1_error, k = _runge_kutta_step(self.func, y0, f0, t0, dt, tableau=_TSITOURAS_TABLEAU)
########################################################
# Error Ratio #
########################################################
error_tol = tuple(self.atol + self.rtol * torch.max(torch.abs(y0_), torch.abs(y1_)) for y0_, y1_ in zip(y0, y1))
tensor_error_ratio = tuple(y1_error_ / error_tol_ for y1_error_, error_tol_ in zip(y1_error, error_tol))
sq_error_ratio = tuple(
torch.mul(tensor_error_ratio_, tensor_error_ratio_) for tensor_error_ratio_ in tensor_error_ratio
)
mean_error_ratio = (
sum(torch.sum(sq_error_ratio_) for sq_error_ratio_ in sq_error_ratio) /
sum(sq_error_ratio_.numel() for sq_error_ratio_ in sq_error_ratio)
)
accept_step = mean_error_ratio <= 1
########################################################
# Update RK State #
########################################################
y_next = y1 if accept_step else y0
f_next = f1 if accept_step else f0
t_next = t0 + dt if accept_step else t0
dt_next = _optimal_step_size(dt, mean_error_ratio, self.safety, self.ifactor, self.dfactor)
k_next = k if accept_step else self.rk_state.interp_coeff
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next, k_next)
return rk_state
| 6,777 | 47.414286 | 120 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_skip_step.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_skip_step(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
integration_time = t.type_as(y0)#integration_time.type_as(x)
range_time = t[1]-t[0]
skip = range_time * skip_proportion
rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
rand_points[0]=t[0]
rand_points[num_skips-1]=t[1]
integration_time[0]=rand_points[0]
integration_time[1]=rand_points[1]
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
first = out[0]
for i in range(1,rand_points.shape[0]-1):
integration_time[0]=rand_points[i] + skip
integration_time[1]=rand_points[i+1]
if (integration_time[1] - integration_time[0]) > skip :
out = odeint_adjoint( func, out[1], integration_time)
result = out.clone()
result[0] = first
return result
| 3,667 | 38.021277 | 140 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/adams.py | import collections
import torch
from .solvers import AdaptiveStepsizeODESolver
from .misc import (
_handle_unused_kwargs, _select_initial_step, _convert_to_tensor, _scaled_dot_product, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
_MIN_ORDER = 1
_MAX_ORDER = 12
gamma_star = [
1, -1 / 2, -1 / 12, -1 / 24, -19 / 720, -3 / 160, -863 / 60480, -275 / 24192, -33953 / 3628800, -0.00789255,
-0.00678585, -0.00592406, -0.00523669, -0.0046775, -0.00421495, -0.0038269
]
class _VCABMState(collections.namedtuple('_VCABMState', 'y_n, prev_f, prev_t, next_t, phi, order')):
"""Saved state of the variable step size Adams-Bashforth-Moulton solver as described in
Solving Ordinary Differential Equations I - Nonstiff Problems III.5
by Ernst Hairer, Gerhard Wanner, and Syvert P Norsett.
"""
def g_and_explicit_phi(prev_t, next_t, implicit_phi, k):
curr_t = prev_t[0]
dt = next_t - prev_t[0]
g = torch.empty(k + 1).to(prev_t[0])
explicit_phi = collections.deque(maxlen=k)
beta = torch.tensor(1).to(prev_t[0])
g[0] = 1
c = 1 / torch.arange(1, k + 2).to(prev_t[0])
explicit_phi.append(implicit_phi[0])
for j in range(1, k):
beta = (next_t - prev_t[j - 1]) / (curr_t - prev_t[j]) * beta
beat_cast = beta.to(implicit_phi[j][0])
explicit_phi.append(tuple(iphi_ * beat_cast for iphi_ in implicit_phi[j]))
c = c[:-1] - c[1:] if j == 1 else c[:-1] - c[1:] * dt / (next_t - prev_t[j - 1])
g[j] = c[0]
c = c[:-1] - c[1:] * dt / (next_t - prev_t[k - 1])
g[k] = c[0]
return g, explicit_phi
def compute_implicit_phi(explicit_phi, f_n, k):
k = min(len(explicit_phi) + 1, k)
implicit_phi = collections.deque(maxlen=k)
implicit_phi.append(f_n)
for j in range(1, k):
implicit_phi.append(tuple(iphi_ - ephi_ for iphi_, ephi_ in zip(implicit_phi[j - 1], explicit_phi[j - 1])))
return implicit_phi
class VariableCoefficientAdamsBashforth(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, implicit=True, first_step=None, max_order=_MAX_ORDER, safety=0.9, ifactor=10.0, dfactor=0.2,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
self.implicit = implicit
self.first_step = first_step
self.max_order = int(max(_MIN_ORDER, min(max_order, _MAX_ORDER)))
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
def before_integrate(self, t):
prev_f = collections.deque(maxlen=self.max_order + 1)
prev_t = collections.deque(maxlen=self.max_order + 1)
phi = collections.deque(maxlen=self.max_order)
t0 = t[0]
f0 = self.func(t0.type_as(self.y0[0]), self.y0)
prev_t.appendleft(t0)
prev_f.appendleft(f0)
phi.appendleft(f0)
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 2, self.rtol[0], self.atol[0], f0=f0).to(t)
else:
first_step = _select_initial_step(self.func, t[0], self.y0, 2, self.rtol[0], self.atol[0], f0=f0).to(t)
self.vcabm_state = _VCABMState(self.y0, prev_f, prev_t, next_t=t[0] + first_step, phi=phi, order=1)
def advance(self, final_t):
final_t = _convert_to_tensor(final_t).to(self.vcabm_state.prev_t[0])
while final_t > self.vcabm_state.prev_t[0]:
self.vcabm_state = self._adaptive_adams_step(self.vcabm_state, final_t)
assert final_t == self.vcabm_state.prev_t[0]
return self.vcabm_state.y_n
def _adaptive_adams_step(self, vcabm_state, final_t):
y0, prev_f, prev_t, next_t, prev_phi, order = vcabm_state
if next_t > final_t:
next_t = final_t
dt = (next_t - prev_t[0])
dt_cast = dt.to(y0[0])
# Explicit predictor step.
g, phi = g_and_explicit_phi(prev_t, next_t, prev_phi, order)
g = g.to(y0[0])
p_next = tuple(
y0_ + _scaled_dot_product(dt_cast, g[:max(1, order - 1)], phi_[:max(1, order - 1)])
for y0_, phi_ in zip(y0, tuple(zip(*phi)))
)
# Update phi to implicit.
next_f0 = self.func(next_t.to(p_next[0]), p_next)
implicit_phi_p = compute_implicit_phi(phi, next_f0, order + 1)
# Implicit corrector step.
y_next = tuple(
p_next_ + dt_cast * g[order - 1] * iphi_ for p_next_, iphi_ in zip(p_next, implicit_phi_p[order - 1])
)
# Error estimation.
tolerance = tuple(
atol_ + rtol_ * torch.max(torch.abs(y0_), torch.abs(y1_))
for atol_, rtol_, y0_, y1_ in zip(self.atol, self.rtol, y0, y_next)
)
local_error = tuple(dt_cast * (g[order] - g[order - 1]) * iphi_ for iphi_ in implicit_phi_p[order])
error_k = _compute_error_ratio(local_error, tolerance)
accept_step = (torch.tensor(error_k) <= 1).all()
if not accept_step:
# Retry with adjusted step size if step is rejected.
dt_next = _optimal_step_size(dt, error_k, self.safety, self.ifactor, self.dfactor, order=order)
return _VCABMState(y0, prev_f, prev_t, prev_t[0] + dt_next, prev_phi, order=order)
# We accept the step. Evaluate f and update phi.
next_f0 = self.func(next_t.to(p_next[0]), y_next)
implicit_phi = compute_implicit_phi(phi, next_f0, order + 2)
next_order = order
if len(prev_t) <= 4 or order < 3:
next_order = min(order + 1, 3, self.max_order)
else:
error_km1 = _compute_error_ratio(
tuple(dt_cast * (g[order - 1] - g[order - 2]) * iphi_ for iphi_ in implicit_phi_p[order - 1]), tolerance
)
error_km2 = _compute_error_ratio(
tuple(dt_cast * (g[order - 2] - g[order - 3]) * iphi_ for iphi_ in implicit_phi_p[order - 2]), tolerance
)
if min(error_km1 + error_km2) < max(error_k):
next_order = order - 1
elif order < self.max_order:
error_kp1 = _compute_error_ratio(
tuple(dt_cast * gamma_star[order] * iphi_ for iphi_ in implicit_phi_p[order]), tolerance
)
if max(error_kp1) < max(error_k):
next_order = order + 1
# Keep step size constant if increasing order. Else use adaptive step size.
dt_next = dt if next_order > order else _optimal_step_size(
dt, error_k, self.safety, self.ifactor, self.dfactor, order=order + 1
)
prev_f.appendleft(next_f0)
prev_t.appendleft(next_t)
return _VCABMState(p_next, prev_f, prev_t, next_t + dt_next, implicit_phi, order=next_order)
| 7,148 | 39.851429 | 128 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/rk_common.py | # Based on https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/integrate
import collections
from .misc import _scaled_dot_product, _convert_to_tensor
_ButcherTableau = collections.namedtuple('_ButcherTableau', 'alpha beta c_sol c_error')
class _RungeKuttaState(collections.namedtuple('_RungeKuttaState', 'y1, f1, t0, t1, dt, interp_coeff')):
"""Saved state of the Runge Kutta solver.
Attributes:
y1: Tensor giving the function value at the end of the last time step.
f1: Tensor giving derivative at the end of the last time step.
t0: scalar float64 Tensor giving start of the last time step.
t1: scalar float64 Tensor giving end of the last time step.
dt: scalar float64 Tensor giving the size for the next time step.
interp_coef: list of Tensors giving coefficients for polynomial
interpolation between `t0` and `t1`.
"""
def _runge_kutta_step(func, y0, f0, t0, dt, tableau):
"""Take an arbitrary Runge-Kutta step and estimate error.
Args:
func: Function to evaluate like `func(t, y)` to compute the time derivative
of `y`.
y0: Tensor initial value for the state.
f0: Tensor initial value for the derivative, computed from `func(t0, y0)`.
t0: float64 scalar Tensor giving the initial time.
dt: float64 scalar Tensor giving the size of the desired time step.
tableau: optional _ButcherTableau describing how to take the Runge-Kutta
step.
name: optional name for the operation.
Returns:
Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
calculating these terms.
"""
dtype = y0[0].dtype
device = y0[0].device
t0 = _convert_to_tensor(t0, dtype=dtype, device=device)
dt = _convert_to_tensor(dt, dtype=dtype, device=device)
k = tuple(map(lambda x: [x], f0))
for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
ti = t0 + alpha_i * dt
yi = tuple(y0_ + _scaled_dot_product(dt, beta_i, k_) for y0_, k_ in zip(y0, k))
tuple(k_.append(f_) for k_, f_ in zip(k, func(ti, yi)))
if not (tableau.c_sol[-1] == 0 and tableau.c_sol[:-1] == tableau.beta[-1]):
# This property (true for Dormand-Prince) lets us save a few FLOPs.
yi = tuple(y0_ + _scaled_dot_product(dt, tableau.c_sol, k_) for y0_, k_ in zip(y0, k))
y1 = yi
f1 = tuple(k_[-1] for k_ in k)
y1_error = tuple(_scaled_dot_product(dt, tableau.c_error, k_) for k_ in k)
return (y1, f1, y1_error, k)
def rk4_step_func(func, t, dt, y, k1=None):
if k1 is None: k1 = func(t, y)
k2 = func(t + dt / 2, tuple(y_ + dt * k1_ / 2 for y_, k1_ in zip(y, k1)))
k3 = func(t + dt / 2, tuple(y_ + dt * k2_ / 2 for y_, k2_ in zip(y, k2)))
k4 = func(t + dt, tuple(y_ + dt * k3_ for y_, k3_ in zip(y, k3)))
return tuple((k1_ + 2 * k2_ + 2 * k3_ + k4_) * (dt / 6) for k1_, k2_, k3_, k4_ in zip(k1, k2, k3, k4))
def rk4_alt_step_func(func, t, dt, y, k1=None):
"""Smaller error with slightly more compute."""
if k1 is None: k1 = func(t, y)
k2 = func(t + dt / 3, tuple(y_ + dt * k1_ / 3 for y_, k1_ in zip(y, k1)))
k3 = func(t + dt * 2 / 3, tuple(y_ + dt * (k1_ / -3 + k2_) for y_, k1_, k2_ in zip(y, k1, k2)))
k4 = func(t + dt, tuple(y_ + dt * (k1_ - k2_ + k3_) for y_, k1_, k2_, k3_ in zip(y, k1, k2, k3)))
return tuple((k1_ + 3 * k2_ + 3 * k3_ + k4_) * (dt / 8) for k1_, k2_, k3_, k4_ in zip(k1, k2, k3, k4))
| 3,673 | 45.506329 | 106 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/__init__.py | from .odeint import odeint
from .odeint_skip_step import odeint_skip_step
from .odeint_stochastic_end import odeint_stochastic_end
from .odeint_stochastic_end_v2 import odeint_stochastic_end_v2
from .odeint_stochastic_end_v3 import odeint_stochastic_end_v3
from .odeint_stochastic_end_normal import odeint_stochastic_end_normal
from .odeint_adjoint_skip_step import odeint_adjoint_skip_step
from .odeint_adjoint_stochastic_end import odeint_adjoint_stochastic_end
from .odeint_adjoint_stochastic_end_v2 import odeint_adjoint_stochastic_end_v2
from .odeint_adjoint_stochastic_end_v3 import odeint_adjoint_stochastic_end_v3
from .odeint_adjoint_stochastic_end_normal import odeint_adjoint_stochastic_end_normal
from .odeint_stochastic_end_v2_inference import odeint_stochastic_end_v2_inference
from .adjoint import odeint_adjoint
| 828 | 58.214286 | 86 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end_v2.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_stochastic_end_v2(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, shrink_proportion = 0.5, shrink_std = 0.02 , mode='train', min_length=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if isinstance(y0, tuple):
integration_time = t.type_as(y0[0])#integration_time.type_as(x)
else:
integration_time = t.type_as(y0)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = (t[1]-t[0]) * shrink_proportion
#m = normal.Normal(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
m = uniform.Uniform(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
integration_time[0]=t[0]
if mode=='train':
integration_time[1]=max(m.sample(), t[0] + min_length)
else:
integration_time[1]= t[0] + range_time
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev:
integration_time = reverse_time(integration_time)
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
| 4,062 | 35.276786 | 184 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/fixed_grid.py | from .solvers import FixedGridODESolver
from . import rk_common
class Euler(FixedGridODESolver):
def step_func(self, func, t, dt, y):
return tuple(dt * f_ for f_ in func(t, y))
@property
def order(self):
return 1
class Midpoint(FixedGridODESolver):
def step_func(self, func, t, dt, y):
y_mid = tuple(y_ + f_ * dt / 2 for y_, f_ in zip(y, func(t, y)))
return tuple(dt * f_ for f_ in func(t + dt / 2, y_mid))
@property
def order(self):
return 2
class RK4(FixedGridODESolver):
def step_func(self, func, t, dt, y):
return rk_common.rk4_alt_step_func(func, t, dt, y)
@property
def order(self):
return 4
| 702 | 19.676471 | 72 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/solvers.py | import abc
import torch
from .misc import _assert_increasing, _handle_unused_kwargs
class AdaptiveStepsizeODESolver(object):
__metaclass__ = abc.ABCMeta
def __init__(self, func, y0, atol, rtol, **unused_kwargs):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.atol = atol
self.rtol = rtol
def before_integrate(self, t):
pass
@abc.abstractmethod
def advance(self, next_t):
raise NotImplementedError
def integrate(self, t):
_assert_increasing(t)
solution = [self.y0]
t = t.to(self.y0[0].device, torch.float64)
self.before_integrate(t)
for i in range(1, len(t)):
y = self.advance(t[i])
solution.append(y)
return tuple(map(torch.stack, tuple(zip(*solution))))
class FixedGridODESolver(object):
__metaclass__ = abc.ABCMeta
def __init__(self, func, y0, step_size=None, grid_constructor=None, **unused_kwargs):
unused_kwargs.pop('rtol', None)
unused_kwargs.pop('atol', None)
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
if step_size is not None and grid_constructor is None:
self.grid_constructor = self._grid_constructor_from_step_size(step_size)
elif grid_constructor is None:
self.grid_constructor = lambda f, y0, t: t
else:
raise ValueError("step_size and grid_constructor are exclusive arguments.")
def _grid_constructor_from_step_size(self, step_size):
def _grid_constructor(func, y0, t):
start_time = t[0]
end_time = t[-1]
niters = torch.ceil((end_time - start_time) / step_size + 1).item()
t_infer = torch.arange(0, niters).to(t) * step_size + start_time
if t_infer[-1] > t[-1]:
t_infer[-1] = t[-1]
return t_infer
return _grid_constructor
@property
@abc.abstractmethod
def order(self):
pass
@abc.abstractmethod
def step_func(self, func, t, dt, y):
pass
def integrate(self, t):
_assert_increasing(t)
t = t.type_as(self.y0[0])
time_grid = self.grid_constructor(self.func, self.y0, t)
assert time_grid[0] == t[0] and time_grid[-1] == t[-1]
time_grid = time_grid.to(self.y0[0])
solution = [self.y0]
j = 1
y0 = self.y0
for t0, t1 in zip(time_grid[:-1], time_grid[1:]):
dy = self.step_func(self.func, t0, t1 - t0, y0)
y1 = tuple(y0_ + dy_ for y0_, dy_ in zip(y0, dy))
while j < len(t) and t1 >= t[j]:
solution.append(self._linear_interp(t0, t1, y0, y1, t[j]))
j += 1
y0 = y1
return tuple(map(torch.stack, tuple(zip(*solution))))
def _linear_interp(self, t0, t1, y0, y1, t):
if t == t0:
return y0
if t == t1:
return y1
t0, t1, t = t0.to(y0[0]), t1.to(y0[0]), t.to(y0[0])
slope = tuple((y1_ - y0_) / (t1 - t0) for y0_, y1_, in zip(y0, y1))
return tuple(y0_ + slope_ * (t - t0) for y0_, slope_ in zip(y0, slope))
| 3,276 | 29.06422 | 89 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_v2.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end_v2(func, y0, actual_t, rtol=1e-7, atol=1e-9, shrink_proportion = 0.5, shrink_std = 0.02, method=None, options=None, mode='train',min_length=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if t.size(0)!=2:
return odeint(func,y0,actual_t)
integration_time = t.type_as(y0)#integration_time.type_as(x)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = (t[1]-t[0]) * shrink_proportion
#m = normal.Normal(t[0]+shrink_proportion, shrink_std)
m = uniform.Uniform(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
integration_time[0]=t[0]
if mode=='train':
integration_time[1]=max(m.sample(), t[0] + min_length)
else:
integration_time[1]= t[0] + range_time
if rev and mode=='train':
integration_time = reverse_time(integration_time)
out = odeint( func, y0, integration_time)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,458 | 35.925743 | 175 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/dopri5.py | # Based on https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/integrate
import torch
from .misc import (
_scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
from .solvers import AdaptiveStepsizeODESolver
from .interp import _interp_fit, _interp_evaluate
from .rk_common import _RungeKuttaState, _ButcherTableau, _runge_kutta_step
_DORMAND_PRINCE_SHAMPINE_TABLEAU = _ButcherTableau(
alpha=[1 / 5, 3 / 10, 4 / 5, 8 / 9, 1., 1.],
beta=[
[1 / 5],
[3 / 40, 9 / 40],
[44 / 45, -56 / 15, 32 / 9],
[19372 / 6561, -25360 / 2187, 64448 / 6561, -212 / 729],
[9017 / 3168, -355 / 33, 46732 / 5247, 49 / 176, -5103 / 18656],
[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
],
c_sol=[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84, 0],
c_error=[
35 / 384 - 1951 / 21600,
0,
500 / 1113 - 22642 / 50085,
125 / 192 - 451 / 720,
-2187 / 6784 - -12231 / 42400,
11 / 84 - 649 / 6300,
-1. / 60.,
],
)
DPS_C_MID = [
6025192743 / 30085553152 / 2, 0, 51252292925 / 65400821598 / 2, -2691868925 / 45128329728 / 2,
187940372067 / 1594534317056 / 2, -1776094331 / 19743644256 / 2, 11237099 / 235043384 / 2
]
def _interp_fit_dopri5(y0, y1, k, dt, tableau=_DORMAND_PRINCE_SHAMPINE_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
dt = dt.type_as(y0[0])
y_mid = tuple(y0_ + _scaled_dot_product(dt, DPS_C_MID, k_) for y0_, k_ in zip(y0, k))
f0 = tuple(k_[0] for k_ in k)
f1 = tuple(k_[-1] for k_ in k)
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _abs_square(x):
return torch.mul(x, x)
def _ta_append(list_of_tensors, value):
"""Append a value to the end of a list of PyTorch tensors."""
list_of_tensors.append(value)
return list_of_tensors
class Dopri5Solver(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
def before_integrate(self, t):
f0 = self.func(t[0].type_as(self.y0[0]), self.y0)
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 4, self.rtol[0], self.atol[0], f0=f0).to(t)
else:
first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)
self.rk_state = _RungeKuttaState(self.y0, f0, t[0], t[0], first_step, interp_coeff=[self.y0] * 5)
def advance(self, next_t):
"""Interpolate through the next time point, integrating as necessary."""
n_steps = 0
while next_t > self.rk_state.t1:
assert n_steps < self.max_num_steps, 'max_num_steps exceeded ({}>={})'.format(n_steps, self.max_num_steps)
self.rk_state = self._adaptive_dopri5_step(self.rk_state)
n_steps += 1
return _interp_evaluate(self.rk_state.interp_coeff, self.rk_state.t0, self.rk_state.t1, next_t)
def _adaptive_dopri5_step(self, rk_state):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
########################################################
# Assertions #
########################################################
assert t0 + dt > t0, 'underflow in dt {}'.format(dt.item())
for y0_ in y0:
assert _is_finite(torch.abs(y0_)), 'non-finite values in state `y`: {}'.format(y0_)
y1, f1, y1_error, k = _runge_kutta_step(self.func, y0, f0, t0, dt, tableau=_DORMAND_PRINCE_SHAMPINE_TABLEAU)
########################################################
# Error Ratio #
########################################################
mean_sq_error_ratio = _compute_error_ratio(y1_error, atol=self.atol, rtol=self.rtol, y0=y0, y1=y1)
accept_step = (torch.tensor(mean_sq_error_ratio) <= 1).all()
########################################################
# Update RK State #
########################################################
y_next = y1 if accept_step else y0
f_next = f1 if accept_step else f0
t_next = t0 + dt if accept_step else t0
interp_coeff = _interp_fit_dopri5(y0, y1, k, dt) if accept_step else interp_coeff
dt_next = _optimal_step_size(
dt, mean_sq_error_ratio, safety=self.safety, ifactor=self.ifactor, dfactor=self.dfactor, order=5
)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next, interp_coeff)
return rk_state
| 5,566 | 44.260163 | 118 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/fixed_adams.py | import sys
import collections
from .solvers import FixedGridODESolver
from .misc import _scaled_dot_product, _has_converged
from . import rk_common
_BASHFORTH_COEFFICIENTS = [
[], # order 0
[11],
[3, -1],
[23, -16, 5],
[55, -59, 37, -9],
[1901, -2774, 2616, -1274, 251],
[4277, -7923, 9982, -7298, 2877, -475],
[198721, -447288, 705549, -688256, 407139, -134472, 19087],
[434241, -1152169, 2183877, -2664477, 2102243, -1041723, 295767, -36799],
[14097247, -43125206, 95476786, -139855262, 137968480, -91172642, 38833486, -9664106, 1070017],
[30277247, -104995189, 265932680, -454661776, 538363838, -444772162, 252618224, -94307320, 20884811, -2082753],
[
2132509567, -8271795124, 23591063805, -46113029016, 63716378958, -63176201472, 44857168434, -22329634920,
7417904451, -1479574348, 134211265
],
[
4527766399, -19433810163, 61633227185, -135579356757, 214139355366, -247741639374, 211103573298, -131365867290,
58189107627, -17410248271, 3158642445, -262747265
],
[
13064406523627, -61497552797274, 214696591002612, -524924579905150, 932884546055895, -1233589244941764,
1226443086129408, -915883387152444, 507140369728425, -202322913738370, 55060974662412, -9160551085734,
703604254357
],
[
27511554976875, -140970750679621, 537247052515662, -1445313351681906, 2854429571790805, -4246767353305755,
4825671323488452, -4204551925534524, 2793869602879077, -1393306307155755, 505586141196430, -126174972681906,
19382853593787, -1382741929621
],
[
173233498598849, -960122866404112, 3966421670215481, -11643637530577472, 25298910337081429, -41825269932507728,
53471026659940509, -53246738660646912, 41280216336284259, -24704503655607728, 11205849753515179,
-3728807256577472, 859236476684231, -122594813904112, 8164168737599
],
[
362555126427073, -2161567671248849, 9622096909515337, -30607373860520569, 72558117072259733,
-131963191940828581, 187463140112902893, -210020588912321949, 186087544263596643, -129930094104237331,
70724351582843483, -29417910911251819, 9038571752734087, -1934443196892599, 257650275915823, -16088129229375
],
[
192996103681340479, -1231887339593444974, 5878428128276811750, -20141834622844109630, 51733880057282977010,
-102651404730855807942, 160414858999474733422, -199694296833704562550, 199061418623907202560,
-158848144481581407370, 100878076849144434322, -50353311405771659322, 19338911944324897550,
-5518639984393844930, 1102560345141059610, -137692773163513234, 8092989203533249
],
[
401972381695456831, -2735437642844079789, 13930159965811142228, -51150187791975812900, 141500575026572531760,
-304188128232928718008, 518600355541383671092, -710171024091234303204, 786600875277595877750,
-706174326992944287370, 512538584122114046748, -298477260353977522892, 137563142659866897224,
-49070094880794267600, 13071639236569712860, -2448689255584545196, 287848942064256339, -15980174332775873
],
[
333374427829017307697, -2409687649238345289684, 13044139139831833251471, -51099831122607588046344,
151474888613495715415020, -350702929608291455167896, 647758157491921902292692, -967713746544629658690408,
1179078743786280451953222, -1176161829956768365219840, 960377035444205950813626, -639182123082298748001432,
343690461612471516746028, -147118738993288163742312, 48988597853073465932820, -12236035290567356418552,
2157574942881818312049, -239560589366324764716, 12600467236042756559
],
[
691668239157222107697, -5292843584961252933125, 30349492858024727686755, -126346544855927856134295,
399537307669842150996468, -991168450545135070835076, 1971629028083798845750380, -3191065388846318679544380,
4241614331208149947151790, -4654326468801478894406214, 4222756879776354065593786, -3161821089800186539248210,
1943018818982002395655620, -970350191086531368649620, 387739787034699092364924, -121059601023985433003532,
28462032496476316665705, -4740335757093710713245, 498669220956647866875, -24919383499187492303
],
]
_MOULTON_COEFFICIENTS = [
[], # order 0
[1],
[1, 1],
[5, 8, -1],
[9, 19, -5, 1],
[251, 646, -264, 106, -19],
[475, 1427, -798, 482, -173, 27],
[19087, 65112, -46461, 37504, -20211, 6312, -863],
[36799, 139849, -121797, 123133, -88547, 41499, -11351, 1375],
[1070017, 4467094, -4604594, 5595358, -5033120, 3146338, -1291214, 312874, -33953],
[2082753, 9449717, -11271304, 16002320, -17283646, 13510082, -7394032, 2687864, -583435, 57281],
[
134211265, 656185652, -890175549, 1446205080, -1823311566, 1710774528, -1170597042, 567450984, -184776195,
36284876, -3250433
],
[
262747265, 1374799219, -2092490673, 3828828885, -5519460582, 6043521486, -4963166514, 3007739418, -1305971115,
384709327, -68928781, 5675265
],
[
703604254357, 3917551216986, -6616420957428, 13465774256510, -21847538039895, 27345870698436, -26204344465152,
19058185652796, -10344711794985, 4063327863170, -1092096992268, 179842822566, -13695779093
],
[
1382741929621, 8153167962181, -15141235084110, 33928990133618, -61188680131285, 86180228689563, -94393338653892,
80101021029180, -52177910882661, 25620259777835, -9181635605134, 2268078814386, -345457086395, 24466579093
],
[
8164168737599, 50770967534864, -102885148956217, 251724894607936, -499547203754837, 781911618071632,
-963605400824733, 934600833490944, -710312834197347, 418551804601264, -187504936597931, 61759426692544,
-14110480969927, 1998759236336, -132282840127
],
[
16088129229375, 105145058757073, -230992163723849, 612744541065337, -1326978663058069, 2285168598349733,
-3129453071993581, 3414941728852893, -2966365730265699, 2039345879546643, -1096355235402331, 451403108933483,
-137515713789319, 29219384284087, -3867689367599, 240208245823
],
[
8092989203533249, 55415287221275246, -131240807912923110, 375195469874202430, -880520318434977010,
1654462865819232198, -2492570347928318318, 3022404969160106870, -2953729295811279360, 2320851086013919370,
-1455690451266780818, 719242466216944698, -273894214307914510, 77597639915764930, -15407325991235610,
1913813460537746, -111956703448001
],
[
15980174332775873, 114329243705491117, -290470969929371220, 890337710266029860, -2250854333681641520,
4582441343348851896, -7532171919277411636, 10047287575124288740, -10910555637627652470, 9644799218032932490,
-6913858539337636636, 3985516155854664396, -1821304040326216520, 645008976643217360, -170761422500096220,
31816981024600492, -3722582669836627, 205804074290625
],
[
12600467236042756559, 93965550344204933076, -255007751875033918095, 834286388106402145800,
-2260420115705863623660, 4956655592790542146968, -8827052559979384209108, 12845814402199484797800,
-15345231910046032448070, 15072781455122686545920, -12155867625610599812538, 8008520809622324571288,
-4269779992576330506540, 1814584564159445787240, -600505972582990474260, 149186846171741510136,
-26182538841925312881, 2895045518506940460, -151711881512390095
],
[
24919383499187492303, 193280569173472261637, -558160720115629395555, 1941395668950986461335,
-5612131802364455926260, 13187185898439270330756, -25293146116627869170796, 39878419226784442421820,
-51970649453670274135470, 56154678684618739939910, -50320851025594566473146, 37297227252822858381906,
-22726350407538133839300, 11268210124987992327060, -4474886658024166985340, 1389665263296211699212,
-325187970422032795497, 53935307402575440285, -5652892248087175675, 281550972898020815
],
]
_DIVISOR = [
None, 11, 2, 12, 24, 720, 1440, 60480, 120960, 3628800, 7257600, 479001600, 958003200, 2615348736000, 5230697472000,
31384184832000, 62768369664000, 32011868528640000, 64023737057280000, 51090942171709440000, 102181884343418880000
]
_MIN_ORDER = 4
_MAX_ORDER = 12
_MAX_ITERS = 4
class AdamsBashforthMoulton(FixedGridODESolver):
def __init__(
self, func, y0, rtol=1e-3, atol=1e-4, implicit=True, max_iters=_MAX_ITERS, max_order=_MAX_ORDER, **kwargs
):
super(AdamsBashforthMoulton, self).__init__(func, y0, **kwargs)
self.rtol = rtol
self.atol = atol
self.implicit = implicit
self.max_iters = max_iters
self.max_order = int(min(max_order, _MAX_ORDER))
self.prev_f = collections.deque(maxlen=self.max_order - 1)
self.prev_t = None
def _update_history(self, t, f):
if self.prev_t is None or self.prev_t != t:
self.prev_f.appendleft(f)
self.prev_t = t
def step_func(self, func, t, dt, y):
self._update_history(t, func(t, y))
order = min(len(self.prev_f), self.max_order - 1)
if order < _MIN_ORDER - 1:
# Compute using RK4.
dy = rk_common.rk4_alt_step_func(func, t, dt, y, k1=self.prev_f[0])
return dy
else:
# Adams-Bashforth predictor.
bashforth_coeffs = _BASHFORTH_COEFFICIENTS[order]
ab_div = _DIVISOR[order]
dy = tuple(dt * _scaled_dot_product(1 / ab_div, bashforth_coeffs, f_) for f_ in zip(*self.prev_f))
# Adams-Moulton corrector.
if self.implicit:
moulton_coeffs = _MOULTON_COEFFICIENTS[order + 1]
am_div = _DIVISOR[order + 1]
delta = tuple(dt * _scaled_dot_product(1 / am_div, moulton_coeffs[1:], f_) for f_ in zip(*self.prev_f))
converged = False
for _ in range(self.max_iters):
dy_old = dy
f = func(t + dt, tuple(y_ + dy_ for y_, dy_ in zip(y, dy)))
dy = tuple(dt * (moulton_coeffs[0] / am_div) * f_ + delta_ for f_, delta_ in zip(f, delta))
converged = _has_converged(dy_old, dy, self.rtol, self.atol)
if converged:
break
if not converged:
print('Warning: Functional iteration did not converge. Solution may be incorrect.', file=sys.stderr)
self.prev_f.pop()
self._update_history(t, f)
return dy
@property
def order(self):
return 4
class AdamsBashforth(AdamsBashforthMoulton):
def __init__(self, func, y0, **kwargs):
super(AdamsBashforth, self).__init__(func, y0, implicit=False, **kwargs)
| 10,784 | 49.872642 | 120 | py |
FragmentVC | FragmentVC-main/convert_batch.py | #!/usr/bin/env python3
"""Convert multiple pairs."""
import warnings
from pathlib import Path
from functools import partial
from multiprocessing import Pool, cpu_count
import yaml
import torch
import numpy as np
import soundfile as sf
from jsonargparse import ArgumentParser, ActionConfigFile
from data import load_wav, log_mel_spectrogram, plot_mel, plot_attn
from models import load_pretrained_wav2vec
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser()
parser.add_argument("info_path", type=str)
parser.add_argument("output_dir", type=str, default=".")
parser.add_argument("-c", "--ckpt_path", default="checkpoints/fragmentvc.pt")
parser.add_argument("-w", "--wav2vec_path", default="checkpoints/wav2vec_small.pt")
parser.add_argument("-v", "--vocoder_path", default="checkpoints/vocoder.pt")
parser.add_argument("--sample_rate", type=int, default=16000)
parser.add_argument("--preemph", type=float, default=0.97)
parser.add_argument("--hop_len", type=int, default=326)
parser.add_argument("--win_len", type=int, default=1304)
parser.add_argument("--n_fft", type=int, default=1304)
parser.add_argument("--n_mels", type=int, default=80)
parser.add_argument("--f_min", type=int, default=80)
parser.add_argument("--audio_config", action=ActionConfigFile)
return vars(parser.parse_args())
def main(
info_path,
output_dir,
ckpt_path,
wav2vec_path,
vocoder_path,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
**kwargs,
):
"""Main function."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
wav2vec = load_pretrained_wav2vec(wav2vec_path).to(device)
print("[INFO] Wav2Vec is loaded from", wav2vec_path)
model = torch.jit.load(ckpt_path).to(device).eval()
print("[INFO] FragmentVC is loaded from", ckpt_path)
vocoder = torch.jit.load(vocoder_path).to(device).eval()
print("[INFO] Vocoder is loaded from", vocoder_path)
path2wav = partial(load_wav, sample_rate=sample_rate)
wav2mel = partial(
log_mel_spectrogram,
preemph=preemph,
sample_rate=sample_rate,
n_mels=n_mels,
n_fft=n_fft,
hop_length=hop_len,
win_length=win_len,
f_min=f_min,
)
with open(info_path) as f:
infos = yaml.load(f, Loader=yaml.FullLoader)
out_mels = []
attns = []
for pair_name, pair in infos.items():
src_wav = load_wav(pair["source"], sample_rate, trim=True)
src_wav = torch.FloatTensor(src_wav).unsqueeze(0).to(device)
with Pool(cpu_count()) as pool:
tgt_wavs = pool.map(path2wav, pair["target"])
tgt_mels = pool.map(wav2mel, tgt_wavs)
tgt_mel = np.concatenate(tgt_mels, axis=0)
tgt_mel = torch.FloatTensor(tgt_mel.T).unsqueeze(0).to(device)
with torch.no_grad():
src_feat = wav2vec.extract_features(src_wav, None)[0]
out_mel, attn = model(src_feat, tgt_mel)
out_mel = out_mel.transpose(1, 2).squeeze(0)
out_mels.append(out_mel)
attns.append(attn)
print(f"[INFO] Pair {pair_name} converted")
print("[INFO] Generating waveforms...")
with torch.no_grad():
out_wavs = vocoder.generate(out_mels)
print("[INFO] Waveforms generated")
out_dir = Path(output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
for pair_name, out_mel, out_wav, attn in zip(
infos.keys(), out_mels, out_wavs, attns
):
out_wav = out_wav.cpu().numpy()
out_path = Path(out_dir, pair_name)
plot_mel(out_mel, filename=out_path.with_suffix(".mel.png"))
plot_attn(attn, filename=out_path.with_suffix(".attn.png"))
sf.write(out_path.with_suffix(".wav"), out_wav, sample_rate)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
main(**parse_args())
| 3,966 | 29.05303 | 87 | py |
FragmentVC | FragmentVC-main/convert.py | #!/usr/bin/env python3
"""Convert using one source utterance and multiple target utterances."""
import warnings
from datetime import datetime
from pathlib import Path
from copy import deepcopy
import torch
import numpy as np
import soundfile as sf
from jsonargparse import ArgumentParser, ActionConfigFile
import sox
from data import load_wav, log_mel_spectrogram, plot_mel, plot_attn
from models import load_pretrained_wav2vec
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser()
parser.add_argument("source_path", type=str)
parser.add_argument("target_paths", type=str, nargs="+")
parser.add_argument("-w", "--wav2vec_path", type=str, required=True)
parser.add_argument("-c", "--ckpt_path", type=str, required=True)
parser.add_argument("-v", "--vocoder_path", type=str, required=True)
parser.add_argument("-o", "--output_path", type=str, default="output.wav")
parser.add_argument("--sample_rate", type=int, default=16000)
parser.add_argument("--preemph", type=float, default=0.97)
parser.add_argument("--hop_len", type=int, default=326)
parser.add_argument("--win_len", type=int, default=1304)
parser.add_argument("--n_fft", type=int, default=1304)
parser.add_argument("--n_mels", type=int, default=80)
parser.add_argument("--f_min", type=int, default=80)
parser.add_argument("--audio_config", action=ActionConfigFile)
return vars(parser.parse_args())
def main(
source_path,
target_paths,
wav2vec_path,
ckpt_path,
vocoder_path,
output_path,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
**kwargs,
):
"""Main function."""
begin_time = step_moment = datetime.now()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
wav2vec = load_pretrained_wav2vec(wav2vec_path).to(device)
print("[INFO] Wav2Vec is loaded from", wav2vec_path)
model = torch.jit.load(ckpt_path).to(device).eval()
print("[INFO] FragmentVC is loaded from", ckpt_path)
vocoder = torch.jit.load(vocoder_path).to(device).eval()
print("[INFO] Vocoder is loaded from", vocoder_path)
elaspe_time = datetime.now() - step_moment
step_moment = datetime.now()
print("[INFO] elasped time", elaspe_time.total_seconds())
tfm = sox.Transformer()
tfm.vad(location=1)
tfm.vad(location=-1)
src_wav = load_wav(source_path, sample_rate)
src_wav = deepcopy(tfm.build_array(input_array=src_wav, sample_rate_in=sample_rate))
src_wav = torch.FloatTensor(src_wav).unsqueeze(0).to(device)
print("[INFO] source waveform shape:", src_wav.shape)
tgt_mels = []
for tgt_path in target_paths:
tgt_wav = load_wav(tgt_path, sample_rate)
tgt_wav = tfm.build_array(input_array=tgt_wav, sample_rate_in=sample_rate)
tgt_wav = deepcopy(tgt_wav)
tgt_mel = log_mel_spectrogram(
tgt_wav, preemph, sample_rate, n_mels, n_fft, hop_len, win_len, f_min
)
tgt_mels.append(tgt_mel)
tgt_mel = np.concatenate(tgt_mels, axis=0)
tgt_mel = torch.FloatTensor(tgt_mel.T).unsqueeze(0).to(device)
print("[INFO] target spectrograms shape:", tgt_mel.shape)
with torch.no_grad():
src_feat = wav2vec.extract_features(src_wav, None)[0]
print("[INFO] source Wav2Vec feature shape:", src_feat.shape)
elaspe_time = datetime.now() - step_moment
step_moment = datetime.now()
print("[INFO] elasped time", elaspe_time.total_seconds())
out_mel, attns = model(src_feat, tgt_mel)
out_mel = out_mel.transpose(1, 2).squeeze(0)
print("[INFO] converted spectrogram shape:", out_mel.shape)
elaspe_time = datetime.now() - step_moment
step_moment = datetime.now()
print("[INFO] elasped time", elaspe_time.total_seconds())
out_wav = vocoder.generate([out_mel])[0]
out_wav = out_wav.cpu().numpy()
print("[INFO] generated waveform shape:", out_wav.shape)
elaspe_time = datetime.now() - step_moment
step_moment = datetime.now()
print("[INFO] elasped time", elaspe_time.total_seconds())
wav_path = Path(output_path)
sf.write(wav_path, out_wav, sample_rate)
print("[INFO] generated waveform is saved to", wav_path)
mel_path = wav_path.with_suffix(".mel.png")
plot_mel(out_mel, filename=mel_path)
print("[INFO] mel-spectrogram plot is saved to", mel_path)
attn_path = wav_path.with_suffix(".attn.png")
plot_attn(attns, filename=attn_path)
print("[INFO] attention plot is saved to", attn_path)
elaspe_time = datetime.now() - begin_time
print("[INFO] Overall elasped time", elaspe_time.total_seconds())
if __name__ == "__main__":
warnings.filterwarnings("ignore")
main(**parse_args())
| 4,829 | 32.776224 | 88 | py |
FragmentVC | FragmentVC-main/train.py | #!/usr/bin/env python3
"""Train FragmentVC model."""
import argparse
import datetime
import random
from pathlib import Path
import torch
import torch.nn as nn
from torch.optim import AdamW
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from data import IntraSpeakerDataset, collate_batch
from models import FragmentVC, get_cosine_schedule_with_warmup
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", type=str)
parser.add_argument("--save_dir", type=str, default=".")
parser.add_argument("--total_steps", type=int, default=250000)
parser.add_argument("--warmup_steps", type=int, default=500)
parser.add_argument("--valid_steps", type=int, default=1000)
parser.add_argument("--log_steps", type=int, default=100)
parser.add_argument("--save_steps", type=int, default=10000)
parser.add_argument("--milestones", type=int, nargs=2, default=[50000, 150000])
parser.add_argument("--exclusive_rate", type=float, default=1.0)
parser.add_argument("--n_samples", type=int, default=10)
parser.add_argument("--accu_steps", type=int, default=2)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--n_workers", type=int, default=8)
parser.add_argument("--preload", action="store_true")
parser.add_argument("--comment", type=str)
return vars(parser.parse_args())
def model_fn(batch, model, criterion, self_exclude, ref_included, device):
"""Forward a batch through model."""
srcs, src_masks, refs, ref_masks, tgts, tgt_masks, overlap_lens = batch
srcs = srcs.to(device)
src_masks = src_masks.to(device)
refs = refs.to(device)
ref_masks = ref_masks.to(device)
tgts = tgts.to(device)
tgt_masks = tgt_masks.to(device)
if ref_included:
if random.random() >= self_exclude:
refs = torch.cat((refs, tgts), dim=2)
ref_masks = torch.cat((ref_masks, tgt_masks), dim=1)
else:
refs = tgts
ref_masks = tgt_masks
outs, _ = model(srcs, refs, src_masks=src_masks, ref_masks=ref_masks)
losses = []
for out, tgt, overlap_len in zip(outs.unbind(), tgts.unbind(), overlap_lens):
loss = criterion(out[:, :overlap_len], tgt[:, :overlap_len])
losses.append(loss)
return sum(losses) / len(losses)
def valid(dataloader, model, criterion, device):
"""Validate on validation set."""
model.eval()
running_loss = 0.0
pbar = tqdm(total=len(dataloader.dataset), ncols=0, desc="Valid", unit=" uttr")
for i, batch in enumerate(dataloader):
with torch.no_grad():
loss = model_fn(batch, model, criterion, 1.0, True, device)
running_loss += loss.item()
pbar.update(dataloader.batch_size)
pbar.set_postfix(loss=f"{running_loss / (i+1):.2f}")
pbar.close()
model.train()
return running_loss / len(dataloader)
def main(
data_dir,
save_dir,
total_steps,
warmup_steps,
valid_steps,
log_steps,
save_steps,
milestones,
exclusive_rate,
n_samples,
accu_steps,
batch_size,
n_workers,
preload,
comment,
):
"""Main function."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
metadata_path = Path(data_dir) / "metadata.json"
dataset = IntraSpeakerDataset(data_dir, metadata_path, n_samples, preload)
lengths = [trainlen := int(0.9 * len(dataset)), len(dataset) - trainlen]
trainset, validset = random_split(dataset, lengths)
train_loader = DataLoader(
trainset,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=n_workers,
pin_memory=True,
collate_fn=collate_batch,
)
valid_loader = DataLoader(
validset,
batch_size=batch_size * accu_steps,
num_workers=n_workers,
drop_last=True,
pin_memory=True,
collate_fn=collate_batch,
)
train_iterator = iter(train_loader)
if comment is not None:
log_dir = "logs/"
log_dir += datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
log_dir += "_" + comment
writer = SummaryWriter(log_dir)
save_dir_path = Path(save_dir)
save_dir_path.mkdir(parents=True, exist_ok=True)
model = FragmentVC().to(device)
model = torch.jit.script(model)
criterion = nn.L1Loss()
optimizer = AdamW(model.parameters(), lr=1e-4)
scheduler = get_cosine_schedule_with_warmup(optimizer, warmup_steps, total_steps)
best_loss = float("inf")
best_state_dict = None
self_exclude = 0.0
ref_included = False
pbar = tqdm(total=valid_steps, ncols=0, desc="Train", unit=" step")
for step in range(total_steps):
batch_loss = 0.0
for _ in range(accu_steps):
try:
batch = next(train_iterator)
except StopIteration:
train_iterator = iter(train_loader)
batch = next(train_iterator)
loss = model_fn(batch, model, criterion, self_exclude, ref_included, device)
loss = loss / accu_steps
batch_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
pbar.update()
pbar.set_postfix(loss=f"{batch_loss:.2f}", excl=self_exclude, step=step + 1)
if step % log_steps == 0 and comment is not None:
writer.add_scalar("Loss/train", batch_loss, step)
writer.add_scalar("Self-exclusive Rate", self_exclude, step)
if (step + 1) % valid_steps == 0:
pbar.close()
valid_loss = valid(valid_loader, model, criterion, device)
if comment is not None:
writer.add_scalar("Loss/valid", valid_loss, step + 1)
if valid_loss < best_loss:
best_loss = valid_loss
best_state_dict = model.state_dict()
pbar = tqdm(total=valid_steps, ncols=0, desc="Train", unit=" step")
if (step + 1) % save_steps == 0 and best_state_dict is not None:
loss_str = f"{best_loss:.4f}".replace(".", "dot")
best_ckpt_name = f"retriever-best-loss{loss_str}.pt"
loss_str = f"{valid_loss:.4f}".replace(".", "dot")
curr_ckpt_name = f"retriever-step{step+1}-loss{loss_str}.pt"
current_state_dict = model.state_dict()
model.cpu()
model.load_state_dict(best_state_dict)
model.save(str(save_dir_path / best_ckpt_name))
model.load_state_dict(current_state_dict)
model.save(str(save_dir_path / curr_ckpt_name))
model.to(device)
pbar.write(f"Step {step + 1}, best model saved. (loss={best_loss:.4f})")
if (step + 1) >= milestones[1]:
self_exclude = exclusive_rate
elif (step + 1) == milestones[0]:
ref_included = True
optimizer = AdamW(
[
{"params": model.unet.parameters(), "lr": 1e-6},
{"params": model.smoothers.parameters()},
{"params": model.mel_linear.parameters()},
{"params": model.post_net.parameters()},
],
lr=1e-4,
)
scheduler = get_cosine_schedule_with_warmup(
optimizer, warmup_steps, total_steps - milestones[0]
)
pbar.write("Optimizer and scheduler restarted.")
elif (step + 1) > milestones[0]:
self_exclude = (step + 1 - milestones[0]) / (milestones[1] - milestones[0])
self_exclude *= exclusive_rate
pbar.close()
if __name__ == "__main__":
main(**parse_args())
| 7,874 | 30.754032 | 88 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.