repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
FBNETGEN | FBNETGEN-main/main.py | from pathlib import Path
import argparse
import yaml
import torch
from model import FBNETGEN, GNNPredictor, SeqenceModel, BrainNetCNN
from train import BasicTrain, BiLevelTrain, SeqTrain, GNNTrain, BrainCNNTrain
from datetime import datetime
from dataloader import init_dataloader
def main(args):
with open(args.config_filename) as f:
config = yaml.load(f, Loader=yaml.Loader)
dataloaders, node_size, node_feature_size, timeseries_size = \
init_dataloader(config['data'])
config['train']["seq_len"] = timeseries_size
config['train']["node_size"] = node_size
if config['model']['type'] == 'seq':
model = SeqenceModel(config['model'], node_size, timeseries_size)
use_train = SeqTrain
elif config['model']['type'] == 'gnn':
model = GNNPredictor(node_feature_size, node_size)
use_train = GNNTrain
elif config['model']['type'] == 'fbnetgen':
model = FBNETGEN(config['model'], node_size,
node_feature_size, timeseries_size)
use_train = BasicTrain
elif config['model']['type'] == 'brainnetcnn':
model = BrainNetCNN(node_size)
use_train = BrainCNNTrain
if config['train']['method'] == 'bilevel' and \
config['model']['type'] == 'fbnetgen':
parameters = {
'lr': config['train']['lr'],
'weight_decay': config['train']['weight_decay'],
'params': [
{'params': model.extract.parameters()},
{'params': model.emb2graph.parameters()}
]
}
optimizer1 = torch.optim.Adam(**parameters)
optimizer2 = torch.optim.Adam(model.predictor.parameters(),
lr=config['train']['lr'],
weight_decay=config['train']['weight_decay'])
opts = (optimizer1, optimizer2)
use_train = BiLevelTrain
else:
optimizer = torch.optim.Adam(
model.parameters(), lr=config['train']['lr'],
weight_decay=config['train']['weight_decay'])
opts = (optimizer,)
loss_name = 'loss'
if config['train']["group_loss"]:
loss_name = f"{loss_name}_group_loss"
if config['train']["sparsity_loss"]:
loss_name = f"{loss_name}_sparsity_loss"
now = datetime.now()
date_time = now.strftime("%m-%d-%H-%M-%S")
extractor_type = config['model']['extractor_type'] if 'extractor_type' in config['model'] else "none"
embedding_size = config['model']['embedding_size'] if 'embedding_size' in config['model'] else "none"
window_size = config['model']['window_size'] if 'window_size' in config['model'] else "none"
save_folder_name = Path(config['train']['log_folder'])/Path(
date_time +
f"_{config['data']['dataset']}_{config['model']['type']}_{config['train']['method']}"
+ f"_{extractor_type}_{loss_name}_{embedding_size}_{window_size}")
train_process = use_train(
config['train'], model, opts, dataloaders, save_folder_name)
train_process.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default='setting/pnc.yaml', type=str,
help='Configuration filename for training the model.')
parser.add_argument('--repeat_time', default=5, type=int)
args = parser.parse_args()
for i in range(args.repeat_time):
main(args)
| 3,687 | 35.514851 | 109 | py |
FBNETGEN | FBNETGEN-main/dataloader.py |
import numpy as np
import torch
import torch.utils.data as utils
from sklearn import preprocessing
import pandas as pd
from scipy.io import loadmat
import pathlib
class StandardScaler:
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def infer_dataloader(dataset_config):
label_df = pd.read_csv(dataset_config["label"])
if dataset_config["dataset"] == "PNC":
fc_data = np.load(dataset_config["time_seires"], allow_pickle=True).item()
fc_timeseires = fc_data['data'].transpose((0, 2, 1))
fc_id = fc_data['id']
id2gender = dict(zip(label_df['SUBJID'], label_df['sex']))
final_fc, final_label = [], []
for fc, l in zip(fc_timeseires, fc_id):
if l in id2gender:
final_fc.append(fc)
final_label.append(id2gender[l])
final_fc = np.array(final_fc)
elif dataset_config["dataset"] == 'ABCD':
fc_data = np.load(dataset_config["time_seires"], allow_pickle=True)
_, node_size, timeseries = final_fc.shape
encoder = preprocessing.LabelEncoder()
encoder.fit(label_df["sex"])
labels = encoder.transform(final_label)
final_fc = torch.from_numpy(final_fc).float()
return final_fc, labels, node_size, timeseries
def init_dataloader(dataset_config):
if dataset_config["dataset"] == 'ABIDE':
data = np.load(dataset_config["time_seires"], allow_pickle=True).item()
final_fc = data["timeseires"]
final_pearson = data["corr"]
labels = data["label"]
elif dataset_config["dataset"] == "HIV" or dataset_config["dataset"] == "BP":
data = loadmat(dataset_config["node_feature"])
labels = data['label']
labels = labels.reshape(labels.shape[0])
labels[labels==-1] = 0
view = dataset_config["view"]
final_pearson = data[view]
final_pearson = np.array(final_pearson).transpose(2, 0, 1)
final_fc = np.ones((final_pearson.shape[0],1,1))
elif dataset_config["dataset"] == 'PPMI' or dataset_config["dataset"] == 'PPMI_balanced':
m = loadmat(dataset_config["node_feature"])
labels = m['label'] if dataset_config["dataset"] != 'PPMI_balanced' else m['label_new']
labels = labels.reshape(labels.shape[0])
data = m['X'] if dataset_config["dataset"] == 'PPMI' else m['X_new']
final_pearson = np.zeros((data.shape[0], 84, 84))
modal_index = 0
for (index, sample) in enumerate(data):
# Assign the first view in the three views of PPMI to a1
final_pearson[index, :, :] = sample[0][:, :, modal_index]
final_fc = np.ones((final_pearson.shape[0],1,1))
else:
fc_data = np.load(dataset_config["time_seires"], allow_pickle=True)
pearson_data = np.load(dataset_config["node_feature"], allow_pickle=True)
label_df = pd.read_csv(dataset_config["label"])
if dataset_config["dataset"] == 'ABCD':
with open(dataset_config["node_id"], 'r') as f:
lines = f.readlines()
pearson_id = [line[:-1] for line in lines]
with open(dataset_config["seires_id"], 'r') as f:
lines = f.readlines()
fc_id = [line[:-1] for line in lines]
id2pearson = dict(zip(pearson_id, pearson_data))
id2gender = dict(zip(label_df['id'], label_df['sex']))
final_fc, final_label, final_pearson = [], [], []
for fc, l in zip(fc_data, fc_id):
if l in id2gender and l in id2pearson:
if np.any(np.isnan(id2pearson[l])) == False:
final_fc.append(fc)
final_label.append(id2gender[l])
final_pearson.append(id2pearson[l])
final_pearson = np.array(final_pearson)
final_fc = np.array(final_fc)
elif dataset_config["dataset"] == "PNC":
pearson_data, fc_data = pearson_data.item(), fc_data.item()
pearson_id = pearson_data['id']
pearson_data = pearson_data['data']
id2pearson = dict(zip(pearson_id, pearson_data))
fc_id = fc_data['id']
fc_data = fc_data['data']
id2gender = dict(zip(label_df['SUBJID'], label_df['sex']))
final_fc, final_label, final_pearson = [], [], []
for fc, l in zip(fc_data, fc_id):
if l in id2gender and l in id2pearson:
final_fc.append(fc)
final_label.append(id2gender[l])
final_pearson.append(id2pearson[l])
final_pearson = np.array(final_pearson)
final_fc = np.array(final_fc).transpose(0, 2, 1)
_, _, timeseries = final_fc.shape
_, node_size, node_feature_size = final_pearson.shape
scaler = StandardScaler(mean=np.mean(
final_fc), std=np.std(final_fc))
final_fc = scaler.transform(final_fc)
if dataset_config["dataset"] == 'PNC' or dataset_config["dataset"] == 'ABCD':
encoder = preprocessing.LabelEncoder()
encoder.fit(label_df["sex"])
labels = encoder.transform(final_label)
final_fc, final_pearson, labels = [torch.from_numpy(
data).float() for data in (final_fc, final_pearson, labels)]
length = final_fc.shape[0]
train_length = int(length*dataset_config["train_set"])
val_length = int(length*dataset_config["val_set"])
dataset = utils.TensorDataset(
final_fc,
final_pearson,
labels
)
train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(
dataset, [train_length, val_length, length-train_length-val_length])
train_dataloader = utils.DataLoader(
train_dataset, batch_size=dataset_config["batch_size"], shuffle=True, drop_last=False)
val_dataloader = utils.DataLoader(
val_dataset, batch_size=dataset_config["batch_size"], shuffle=True, drop_last=False)
test_dataloader = utils.DataLoader(
test_dataset, batch_size=dataset_config["batch_size"], shuffle=True, drop_last=False)
return (train_dataloader, val_dataloader, test_dataloader), node_size, node_feature_size, timeseries
| 6,468 | 30.556098 | 104 | py |
FBNETGEN | FBNETGEN-main/train.py | from typing import overload
import torch
from numpy.lib import save
from util import Logger, accuracy, TotalMeter
import numpy as np
from pathlib import Path
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_fscore_support
from util.prepossess import mixup_criterion, mixup_data
from util.loss import mixup_cluster_loss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class BasicTrain:
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
self.logger = Logger()
self.model = model.to(device)
self.train_dataloader, self.val_dataloader, self.test_dataloader = dataloaders
self.epochs = train_config['epochs']
self.optimizers = optimizers
self.loss_fn = torch.nn.CrossEntropyLoss(reduction='mean')
self.group_loss = train_config['group_loss']
self.sparsity_loss = train_config['sparsity_loss']
self.sparsity_loss_weight = train_config['sparsity_loss_weight']
self.save_path = log_folder
self.save_learnable_graph = True
self.init_meters()
def init_meters(self):
self.train_loss, self.val_loss, self.test_loss, self.train_accuracy,\
self.val_accuracy, self.test_accuracy, self.edges_num = [
TotalMeter() for _ in range(7)]
self.loss1, self.loss2, self.loss3 = [TotalMeter() for _ in range(3)]
def reset_meters(self):
for meter in [self.train_accuracy, self.val_accuracy, self.test_accuracy,
self.train_loss, self.val_loss, self.test_loss, self.edges_num,
self.loss1, self.loss2, self.loss3]:
meter.reset()
def train_per_epoch(self, optimizer):
self.model.train()
for data_in, pearson, label in self.train_dataloader:
label = label.long()
data_in, pearson, label = data_in.to(
device), pearson.to(device), label.to(device)
inputs, nodes, targets_a, targets_b, lam = mixup_data(
data_in, pearson, label, 1, device)
output, learnable_matrix, edge_variance = self.model(inputs, nodes)
loss = 2 * mixup_criterion(
self.loss_fn, output, targets_a, targets_b, lam)
if self.group_loss:
loss += mixup_cluster_loss(learnable_matrix,
targets_a, targets_b, lam)
if self.sparsity_loss:
sparsity_loss = self.sparsity_loss_weight * \
torch.norm(learnable_matrix, p=1)
loss += sparsity_loss
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1 = accuracy(output, label)[0]
self.train_accuracy.update_with_weight(top1, label.shape[0])
self.edges_num.update_with_weight(edge_variance, label.shape[0])
def test_per_epoch(self, dataloader, loss_meter, acc_meter):
labels = []
result = []
self.model.eval()
for data_in, pearson, label in dataloader:
label = label.long()
data_in, pearson, label = data_in.to(
device), pearson.to(device), label.to(device)
output, _, _ = self.model(data_in, pearson)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
top1 = accuracy(output, label)[0]
acc_meter.update_with_weight(top1, label.shape[0])
result += F.softmax(output, dim=1)[:, 1].tolist()
labels += label.tolist()
auc = roc_auc_score(labels, result)
result = np.array(result)
result[result > 0.5] = 1
result[result <= 0.5] = 0
metric = precision_recall_fscore_support(
labels, result, average='micro')
return [auc] + list(metric)
def generate_save_learnable_matrix(self):
learable_matrixs = []
labels = []
for data_in, nodes, label in self.test_dataloader:
label = label.long()
data_in, nodes, label = data_in.to(
device), nodes.to(device), label.to(device)
_, learable_matrix, _ = self.model(data_in, nodes)
learable_matrixs.append(learable_matrix.cpu().detach().numpy())
labels += label.tolist()
self.save_path.mkdir(exist_ok=True, parents=True)
np.save(self.save_path/"learnable_matrix.npy", {'matrix': np.vstack(
learable_matrixs), "label": np.array(labels)}, allow_pickle=True)
def save_result(self, results):
self.save_path.mkdir(exist_ok=True, parents=True)
np.save(self.save_path/"training_process.npy",
results, allow_pickle=True)
torch.save(self.model.state_dict(), self.save_path/"model.pt")
def train(self):
training_process = []
for epoch in range(self.epochs):
self.reset_meters()
self.train_per_epoch(self.optimizers[0])
val_result = self.test_per_epoch(self.val_dataloader,
self.val_loss, self.val_accuracy)
test_result = self.test_per_epoch(self.test_dataloader,
self.test_loss, self.test_accuracy)
self.logger.info(" | ".join([
f'Epoch[{epoch}/{self.epochs}]',
f'Train Loss:{self.train_loss.avg: .3f}',
f'Train Accuracy:{self.train_accuracy.avg: .3f}%',
f'Edges:{self.edges_num.avg: .3f}',
f'Test Loss:{self.test_loss.avg: .3f}',
f'Test Accuracy:{self.test_accuracy.avg: .3f}%',
f'Val AUC:{val_result[0]:.2f}',
f'Test AUC:{test_result[0]:.2f}'
]))
training_process.append([self.train_accuracy.avg, self.train_loss.avg,
self.val_loss.avg, self.test_loss.avg]
+ val_result + test_result)
if self.save_learnable_graph:
self.generate_save_learnable_matrix()
self.save_result(training_process)
class BiLevelTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
def train(self):
training_process = []
matrix_epoch = 5
for epoch in range(self.epochs):
self.reset_meters()
if epoch % 10 < matrix_epoch:
self.train_per_epoch(self.optimizers[0])
else:
self.train_per_epoch(self.optimizers[1])
val_result = self.test_per_epoch(self.val_dataloader,
self.val_loss, self.val_accuracy)
test_result = self.test_per_epoch(self.test_dataloader,
self.test_loss, self.test_accuracy)
self.logger.info(" | ".join([
f'Epoch[{epoch}/{self.epochs}]',
f'Train Loss:{self.train_loss.avg: .3f}',
f'Train Accuracy:{self.train_accuracy.avg: .3f}%',
f'Edges:{self.edges_num.avg: .3f}',
f'Test Loss:{self.test_loss.avg: .3f}',
f'Test Accuracy:{self.test_accuracy.avg: .3f}%',
f'Val AUC:{val_result[0]:.2f}',
f'Test AUC:{test_result[0]:.2f}'
]))
training_process.append([self.train_accuracy.avg, self.train_loss.avg,
self.val_loss.avg, self.test_loss.avg]
+ val_result + test_result)
if self.save_learnable_graph:
self.generate_save_learnable_matrix()
self.save_result(training_process)
class GNNTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
self.pure_gnn_graph = train_config['pure_gnn_graph']
self.save_learnable_graph = False
def train_per_epoch(self, optimizer):
self.model.train()
for _, pearson, label in self.train_dataloader:
label = label.long()
pearson, label = pearson.to(device), label.to(device)
bz, module_num, _ = pearson.shape
if self.pure_gnn_graph == "uniform":
graph = torch.ones(
(bz, module_num, module_num)).float().to(device)
elif self.pure_gnn_graph == "pearson":
graph = torch.abs(pearson)
graph, nodes, targets_a, targets_b, lam = mixup_data(
graph, pearson, label, 1, device)
output = self.model(graph, nodes)
loss = mixup_criterion(
self.loss_fn, output, targets_a, targets_b, lam)
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1 = accuracy(output, label)[0]
self.train_accuracy.update_with_weight(top1, label.shape[0])
def test_per_epoch(self, dataloader, loss_meter, acc_meter):
labels = []
result = []
self.model.eval()
for _, pearson, label in dataloader:
label = label.long()
pearson, label = pearson.to(device), label.to(device)
bz, module_num, _ = pearson.shape
if self.pure_gnn_graph == "uniform":
graph = torch.ones(
(bz, module_num, module_num)).float().to(device)
elif self.pure_gnn_graph == "pearson":
graph = torch.abs(pearson)
output = self.model(graph, pearson)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
top1 = accuracy(output, label)[0]
acc_meter.update_with_weight(top1, label.shape[0])
result += F.softmax(output, dim=1)[:, 1].tolist()
labels += label.tolist()
auc = roc_auc_score(labels, result)
result = np.array(result)
result[result > 0.5] = 1
result[result <= 0.5] = 0
metric = precision_recall_fscore_support(
labels, result, average='micro')
return [auc] + list(metric)
class SeqTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
self.save_learnable_graph = False
def train_per_epoch(self, optimizer):
self.model.train()
for seq_group, _, label in self.train_dataloader:
label = label.long()
seq_group, label = seq_group.to(device), label.to(device)
seq_group, _, targets_a, targets_b, lam = mixup_data(
seq_group, seq_group, label, 1, device)
output = self.model(seq_group)
loss = mixup_criterion(
self.loss_fn, output, targets_a, targets_b, lam)
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1 = accuracy(output, label)[0]
self.train_accuracy.update_with_weight(top1, label.shape[0])
def test_per_epoch(self, dataloader, loss_meter, acc_meter):
labels = []
result = []
self.model.eval()
for seq_group, _, label in dataloader:
label = label.long()
seq_group, label = seq_group.to(device), label.to(device)
output = self.model(seq_group)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
top1 = accuracy(output, label)[0]
acc_meter.update_with_weight(top1, label.shape[0])
result += F.softmax(output, dim=1)[:, 1].tolist()
labels += label.tolist()
auc = roc_auc_score(labels, result)
result = np.array(result)
result[result > 0.5] = 1
result[result <= 0.5] = 0
metric = precision_recall_fscore_support(
labels, result, average='micro')
return [auc] + list(metric)
class BrainCNNTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
self.save_learnable_graph = False
def train_per_epoch(self, optimizer):
self.model.train()
for _, pearson, label in self.train_dataloader:
label = label.long()
pearson, label = pearson.to(device), label.to(device)
_, nodes, targets_a, targets_b, lam = mixup_data(
pearson, pearson, label, 1, device)
output = self.model(nodes)
loss = mixup_criterion(
self.loss_fn, output, targets_a, targets_b, lam)
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1 = accuracy(output, label)[0]
self.train_accuracy.update_with_weight(top1, label.shape[0])
def test_per_epoch(self, dataloader, loss_meter, acc_meter):
labels = []
result = []
self.model.eval()
for _, pearson, label in dataloader:
label = label.long()
pearson, label = pearson.to(device), label.to(device)
output = self.model(pearson)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
top1 = accuracy(output, label)[0]
acc_meter.update_with_weight(top1, label.shape[0])
result += F.softmax(output, dim=1)[:, 1].tolist()
labels += label.tolist()
auc = roc_auc_score(labels, result)
result = np.array(result)
result[result > 0.5] = 1
result[result <= 0.5] = 0
metric = precision_recall_fscore_support(
labels, result, average='micro')
return [auc] + list(metric)
class FCNetTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder):
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
self.generated_graph = []
def train_per_epoch(self, optimizer):
self.model.train()
for seq_group, label in self.train_dataloader:
label = label.long()
seq_group, label = seq_group.to(device), label.to(device)
output = self.model(seq_group)
loss = self.loss_fn(output, label)
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_per_epoch(self, dataloader, loss_meter, acc_meter, save_graph=False):
self.model.eval()
self.generated_graph = []
for seq_group, label in dataloader:
label = label.long()
seq_group, label = seq_group.to(device), label.to(device)
output = self.model(seq_group)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
return None
def train(self):
training_process = []
for epoch in range(self.epochs):
self.reset_meters()
self.train_per_epoch(self.optimizers[0])
self.test_per_epoch(self.val_dataloader,
self.val_loss, self.val_accuracy)
self.test_per_epoch(self.test_dataloader,
self.test_loss, self.test_accuracy, save_graph=True)
self.logger.info(" | ".join([
f'Epoch[{epoch}/{self.epochs}]',
f'Train Loss:{self.train_loss.avg: .3f}',
f'Train Accuracy:{self.train_accuracy.avg: .3f}%',
f'Edges:{self.edges_num.avg: .3f}',
f'Test Loss:{self.test_loss.avg: .3f}',
f'Test Accuracy:{self.test_accuracy.avg: .3f}%'
]))
training_process.append([self.train_accuracy.avg, self.train_loss.avg,
self.val_loss.avg, self.test_loss.avg])
self.save_result(training_process)
| 16,952 | 34.690526 | 98 | py |
FBNETGEN | FBNETGEN-main/util/prepossess.py | import torch
import numpy as np
import random
def mixup_data(x, nodes, y, alpha=1.0, device='cuda'):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).to(device)
mixed_nodes = lam * nodes + (1 - lam) * nodes[index, :]
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, mixed_nodes, y_a, y_b, lam
def mixup_data_by_class(x, nodes, y, alpha=1.0, device='cuda'):
'''Returns mixed inputs, pairs of targets, and lambda'''
mix_xs, mix_nodes, mix_ys = [], [], []
for t_y in y.unique():
idx = y == t_y
t_mixed_x, t_mixed_nodes, _, _, _ = mixup_data(
x[idx], nodes[idx], y[idx], alpha=alpha, device=device)
mix_xs.append(t_mixed_x)
mix_nodes.append(t_mixed_nodes)
mix_ys.append(y[idx])
return torch.cat(mix_xs, dim=0), torch.cat(mix_nodes, dim=0), torch.cat(mix_ys, dim=0)
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def cal_step_connect(connectity, step):
multi_step = connectity
for _ in range(step):
multi_step = np.dot(multi_step, connectity)
multi_step[multi_step > 0] = 1
return multi_step
def obtain_partition(dataloader, fc_threshold, step=2):
pearsons = []
for data_in, pearson, label in dataloader:
pearsons.append(pearson)
fc_data = torch.mean(torch.cat(pearsons), dim=0)
fc_data[fc_data > fc_threshold] = 1
fc_data[fc_data <= fc_threshold] = 0
_, n = fc_data.shape
final_partition = torch.zeros((n, (n-1)*n//2))
connection = cal_step_connect(fc_data, step)
temp = 0
for i in range(connection.shape[0]):
temp += i
for j in range(i):
if connection[i, j] > 0:
final_partition[i, temp-i+j] = 1
final_partition[j, temp-i+j] = 1
# a = random.randint(0, n-1)
# b = random.randint(0, n-1)
# final_partition[a, temp-i+j] = 1
# final_partition[b, temp-i+j] = 1
connect_num = torch.sum(final_partition > 0)/n
print(f'Final Partition {connect_num}')
return final_partition.cuda().float(), connect_num
| 2,388 | 27.783133 | 90 | py |
FBNETGEN | FBNETGEN-main/util/loss.py | import torch
def inner_loss(label, matrixs):
loss = 0
if torch.sum(label == 0) > 1:
loss += torch.mean(torch.var(matrixs[label == 0], dim=0))
if torch.sum(label == 1) > 1:
loss += torch.mean(torch.var(matrixs[label == 1], dim=0))
return loss
def intra_loss(label, matrixs):
a, b = None, None
if torch.sum(label == 0) > 0:
a = torch.mean(matrixs[label == 0], dim=0)
if torch.sum(label == 1) > 0:
b = torch.mean(matrixs[label == 1], dim=0)
if a is not None and b is not None:
return 1 - torch.mean(torch.pow(a-b, 2))
else:
return 0
def mixup_cluster_loss(matrixs, y_a, y_b, lam, intra_weight=2):
y_1 = lam * y_a.float() + (1 - lam) * y_b.float()
y_0 = 1 - y_1
bz, roi_num, _ = matrixs.shape
matrixs = matrixs.reshape((bz, -1))
sum_1 = torch.sum(y_1)
sum_0 = torch.sum(y_0)
loss = 0.0
if sum_0 > 0:
center_0 = torch.matmul(y_0, matrixs)/sum_0
diff_0 = torch.norm(matrixs-center_0, p=1, dim=1)
loss += torch.matmul(y_0, diff_0)/(sum_0*roi_num*roi_num)
if sum_1 > 0:
center_1 = torch.matmul(y_1, matrixs)/sum_1
diff_1 = torch.norm(matrixs-center_1, p=1, dim=1)
loss += torch.matmul(y_1, diff_1)/(sum_1*roi_num*roi_num)
if sum_0 > 0 and sum_1 > 0:
loss += intra_weight * \
(1 - torch.norm(center_0-center_1, p=1)/(roi_num*roi_num))
return loss
| 1,451 | 24.928571 | 70 | py |
FBNETGEN | FBNETGEN-main/util/logger.py | import logging
class Logger:
def __init__(self):
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
for handler in self.logger.handlers:
handler.close()
self.logger.handlers.clear()
formatter = logging.Formatter(
'[%(asctime)s][%(filename)s][L%(lineno)d][%(levelname)s] %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
self.logger.addHandler(stream_handler)
def info(self, info: str):
self.logger.info(info)
| 579 | 28 | 82 | py |
FBNETGEN | FBNETGEN-main/util/__init__.py | from .logger import Logger
from .meter import AverageMeter, TotalMeter, accuracy
| 81 | 26.333333 | 53 | py |
FBNETGEN | FBNETGEN-main/util/meter.py | from typing import List
import torch
def accuracy(output: torch.Tensor, target: torch.Tensor, top_k=(1,)) -> List[float]:
max_k = max(top_k)
batch_size = target.size(0)
_, predict = output.topk(max_k, 1, True, True)
predict = predict.t()
correct = predict.eq(target.view(1, -1).expand_as(predict))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size).item())
return res
class AverageMeter:
def __init__(self, length: int, name: str = None):
assert length > 0
self.name = name
self.count = 0
self.sum = 0.0
self.current: int = -1
self.history: List[float] = [None] * length
@property
def val(self) -> float:
return self.history[self.current]
@property
def avg(self) -> float:
return self.sum / self.count
def update(self, val: float):
self.current = (self.current + 1) % len(self.history)
self.sum += val
old = self.history[self.current]
if old is None:
self.count += 1
else:
self.sum -= old
self.history[self.current] = val
class TotalMeter:
def __init__(self):
self.sum = 0.0
self.count = 0
def update(self, val: float):
self.sum += val
self.count += 1
def update_with_weight(self, val: float, count: int):
self.sum += val*count
self.count += count
def reset(self):
self.sum = 0
self.count = 0
@property
def avg(self):
if self.count == 0:
return -1
return self.sum / self.count
| 1,699 | 22.943662 | 84 | py |
FBNETGEN | FBNETGEN-main/util/FCNet/fc_net_label_generation.py | from sklearn.cluster import AffinityPropagation
import numpy as np
import argparse
import random
import pathlib
def main(args):
final_fc = np.load(args.data_path, allow_pickle=True)
if args.dataset == 'PNC':
final_fc = final_fc.item()
final_fc = final_fc['data']
column_idxs = []
labels = []
for p, fc in enumerate(final_fc):
print(p)
cluster_result = AffinityPropagation().fit(fc).labels_
node_size = fc.shape[0]
for i in range(node_size):
count = 0
for j in range(i, node_size):
if i == j:
continue
if cluster_result[i] == cluster_result[j]:
column_idxs.append((np.array((p, i, j))))
labels.append(1)
count += 1
while count:
t = random.randint(0, node_size-1)
if t != i and cluster_result[i] == cluster_result[t]:
column_idxs.append((np.array((p, i,t))))
labels.append(0)
count -= 1
column_idxs = np.array(column_idxs)
labels = np.array(labels)
print(f"Sample size: {labels.shape[0]}, positive: {np.sum(labels)}, negative: {labels.shape[0]-np.sum(labels)}")
parent_path = pathlib.Path(args.data_path).parent
np.save(parent_path/'fcnet_training_data.npy', {'index': column_idxs, 'label': labels})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='/home/root/dataset/ABCD/abcd_rest-timeseires-HCP2016.npy', type=str,
help='Configuration filename for training the model.')
parser.add_argument('--dataset', default='ABCD', type=str,
help='Configuration filename for training the model.')
args = parser.parse_args()
main(args) | 1,893 | 29.548387 | 116 | py |
FBNETGEN | FBNETGEN-main/util/FCNet/infer.py | import torch
import argparse
import yaml
from model import SeqenceModel, FCNet
from dataloader import infer_dataloader
from pathlib import Path
import numpy as np
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
def main(args):
with open(args.config_filename) as f:
config = yaml.load(f, Loader=yaml.Loader)
if config['model']['type'] == 'FCNet':
dataset, labels, node_size, timeseries_size = \
infer_dataloader(config['data'])
xs, ys = torch.tril_indices(node_size, node_size, offset=-1)
config['train']["seq_len"] = timeseries_size
config['train']["node_size"] = node_size
if config['model']['type'] == 'seq':
model = SeqenceModel(config['model'], node_size, timeseries_size)
elif config['model']['type'] == 'FCNet':
model = FCNet(node_size, timeseries_size)
model.load_state_dict(torch.load(Path(args.model_path)/'model.pt'))
model.cuda()
model.eval()
features = []
interval = 1000
for d in dataset:
outputs = []
for index in range(0, xs.shape[0], interval):
data = []
for x, y in zip(xs[index: index+interval], ys[index: index+interval]):
data.append(d[[x,y],:])
data = torch.stack(data, dim=0).cuda()
output = model(data)
outputs.append(output[:, 1].detach().cpu().numpy())
outputs = np.concatenate(outputs)
features.append(outputs)
features = np.array(features)
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.33, random_state=42)
linearmodel = ElasticNet(alpha=1.0, l1_ratio=0.2, fit_intercept=False).fit(X_train, y_train)
select_feature = linearmodel.coef_!=0
print('Used feature number: ', np.sum(select_feature))
X_train = X_train[:, select_feature]
X_test = X_test[:, select_feature]
svm = SVC(gamma='auto', probability=True).fit(X_train, y_train)
print("acc", svm.score(X_test, y_test))
prob_result = svm.predict_proba(X_test)
auc = roc_auc_score(y_test, prob_result[:, 1])
print("auc", auc)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', default='result/02-07-15-29-00_PNC_FCNet_normal_none_loss_none_none', type=str,
help='The path of the folder containing the model.')
parser.add_argument('--config_filename', default='setting/pnc.yaml', type=str,
help='Configuration filename for training the model.')
args = parser.parse_args()
main(args)
| 2,775 | 22.726496 | 119 | py |
FBNETGEN | FBNETGEN-main/util/analysis/extract_info_from_log.py | import argparse
import re
def main(args):
table = []
with open(args.path, 'r') as f:
lines = f.readlines()
for l in lines:
value = re.findall(r'.*Epoch\[(\d+)/500\].*Train Loss: (\d+\.\d+).*Test Loss: (\d+\.\d+)', l)
table.append(value[0])
s = f'|Epoch|'
for i in range(0, 500, 50):
s += f'{i}|'
print(s)
for j, name in enumerate(['Train Loss', "Test Loss"]):
s = f'|{name}|'
for i in range(0, 500, 50):
s += f'{table[i][j+1]}|'
print(s)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', default='ABCD_fbnetgen_cnn_group_loss_false_sparsity_loss_true_window_8_emb_16', type=str,
help='Log file path.')
args = parser.parse_args()
main(args) | 853 | 25.6875 | 124 | py |
FBNETGEN | FBNETGEN-main/util/abide/03-generate_abide_dataset.py | import deepdish as dd
import os.path as osp
import os
import numpy as np
import argparse
from pathlib import Path
import pandas as pd
def main(args):
data_dir = os.path.join(args.root_path, 'ABIDE_pcp/cpac/filt_noglobal/raw')
timeseires = os.path.join(args.root_path, 'ABIDE_pcp/cpac/filt_noglobal/')
meta_file = os.path.join(args.root_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')
meta_file = pd.read_csv(meta_file, header=0)
id2site = meta_file[["subject", "SITE_ID"]]
# pandas to map
id2site = id2site.set_index("subject")
id2site = id2site.to_dict()['SITE_ID']
times = []
labels = []
pcorrs = []
corrs = []
site_list = []
for f in os.listdir(data_dir):
if osp.isfile(osp.join(data_dir, f)):
fname = f.split('.')[0]
site = id2site[int(fname)]
files = os.listdir(osp.join(timeseires, fname))
file = list(filter(lambda x: x.endswith("1D"), files))[0]
time = np.loadtxt(osp.join(timeseires, fname, file), skiprows=0).T
if time.shape[1] < 100:
continue
temp = dd.io.load(osp.join(data_dir, f))
pcorr = temp['pcorr'][()]
pcorr[pcorr == float('inf')] = 0
att = temp['corr'][()]
att[att == float('inf')] = 0
label = temp['label']
times.append(time[:,:100])
labels.append(label[0])
corrs.append(att)
pcorrs.append(pcorr)
site_list.append(site)
np.save(Path(args.root_path)/'ABIDE_pcp/abide.npy', {'timeseires': np.array(times), "label": np.array(labels),"corr": np.array(corrs),"pcorr": np.array(pcorrs), 'site': np.array(site_list)})
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate the final dataset')
parser.add_argument('--root_path', default="", type=str, help='The path of the folder containing the dataset folder.')
args = parser.parse_args()
main(args)
| 2,040 | 26.958904 | 194 | py |
FBNETGEN | FBNETGEN-main/util/abide/02-process_data.py | # Copyright (c) 2019 Mwiza Kunda
# Modified by Xuan Kan
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import argparse
import pandas as pd
import numpy as np
from preprocess_data import Reader
import deepdish as dd
import warnings
import os
warnings.filterwarnings("ignore")
# Process boolean command line arguments
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main(args):
print('Arguments: \n', args)
data_folder = os.path.join(args.root_path, 'ABIDE_pcp/cpac/filt_noglobal/')
params = dict()
params['seed'] = args.seed # seed for random initialisation
# Algorithm choice
params['atlas'] = args.atlas # Atlas for network construction
atlas = args.atlas # Atlas for network construction (node definition)
reader = Reader(args.root_path, args.id_file_path)
# Get subject IDs and class labels
subject_IDs = reader.get_ids()
labels = reader.get_subject_score(subject_IDs, score='DX_GROUP')
# Number of subjects and classes for binary classification
num_classes = args.nclass
num_subjects = len(subject_IDs)
params['n_subjects'] = num_subjects
# Initialise variables for class labels and acquisition sites
# 1 is autism, 2 is control
y_data = np.zeros([num_subjects, num_classes]) # n x 2
y = np.zeros([num_subjects, 1]) # n x 1
# Get class labels for all subjects
for i in range(num_subjects):
y_data[i, int(labels[subject_IDs[i]]) - 1] = 1
y[i] = int(labels[subject_IDs[i]])
# Compute feature vectors (vectorised connectivity networks)
fea_corr = reader.get_networks(subject_IDs, iter_no='', kind='correlation', atlas_name=atlas) #(1035, 200, 200)
fea_pcorr = reader.get_networks(subject_IDs, iter_no='', kind='partial correlation', atlas_name=atlas) #(1035, 200, 200)
if not os.path.exists(os.path.join(data_folder,'raw')):
os.makedirs(os.path.join(data_folder,'raw'))
for i, subject in enumerate(subject_IDs):
dd.io.save(os.path.join(data_folder,'raw',subject+'.h5'),{'corr':fea_corr[i],'pcorr':fea_pcorr[i],'label':(y[i]-1)})
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Classification of the ABIDE dataset using a Ridge classifier. '
'MIDA is used to minimize the distribution mismatch between ABIDE sites')
parser.add_argument('--atlas', default='cc200',
help='Atlas for network construction (node definition) options: ho, cc200, cc400, default: cc200.')
parser.add_argument('--seed', default=123, type=int, help='Seed for random initialisation. default: 1234.')
parser.add_argument('--nclass', default=2, type=int, help='Number of classes. default:2')
parser.add_argument('--root_path', default="", type=str, help='The path of the folder containing the dataset folder.')
parser.add_argument('--id_file_path', default="subject_IDs.txt", type=str, help='The path to subject_IDs.txt.')
args = parser.parse_args()
main(args)
| 3,874 | 37.366337 | 124 | py |
FBNETGEN | FBNETGEN-main/util/abide/01-fetch_data.py | # Copyright (c) 2019 Mwiza Kunda
# Copyright (C) 2017 Sarah Parisot <s.parisot@imperial.ac.uk>, , Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
This script mainly refers to https://github.com/kundaMwiza/fMRI-site-adaptation/blob/master/fetch_data.py
'''
from nilearn import datasets
import argparse
from preprocess_data import Reader
import os
import shutil
import sys
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main(args):
print(args)
root_folder = args.root_path
data_folder = os.path.join(root_folder, 'ABIDE_pcp/cpac/filt_noglobal/')
if not os.path.exists(data_folder):
os.makedirs(data_folder)
pipeline = args.pipeline
atlas = args.atlas
download = args.download
# Files to fetch
files = ['rois_' + atlas]
filemapping = {'func_preproc': 'func_preproc.nii.gz',
files[0]: files[0] + '.1D'}
# Download database files
if download == True:
abide = datasets.fetch_abide_pcp(data_dir=root_folder, pipeline=pipeline,
band_pass_filtering=True, global_signal_regression=False, derivatives=files,
quality_checked=False)
reader = Reader(root_folder, args.id_file_path)
subject_IDs = reader.get_ids() #changed path to data path
subject_IDs = subject_IDs.tolist()
# Create a folder for each subject
for s, fname in zip(subject_IDs, reader.fetch_filenames(subject_IDs, files[0], atlas)):
subject_folder = os.path.join(data_folder, s)
if not os.path.exists(subject_folder):
os.mkdir(subject_folder)
# Get the base filename for each subject
base = fname.split(files[0])[0]
# Move each subject file to the subject folder
for fl in files:
if not os.path.exists(os.path.join(subject_folder, base + filemapping[fl])):
shutil.move(base + filemapping[fl], subject_folder)
time_series = reader.get_timeseries(subject_IDs, atlas)
# Compute and save connectivity matrices
reader.subject_connectivity(time_series, subject_IDs, atlas, 'correlation')
reader.subject_connectivity(time_series, subject_IDs, atlas, 'partial correlation')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download ABIDE data and compute functional connectivity matrices')
parser.add_argument('--pipeline', default='cpac', type=str,
help='Pipeline to preprocess ABIDE data. Available options are ccs, cpac, dparsf and niak.'
' default: cpac.')
parser.add_argument('--atlas', default='cc200',
help='Brain parcellation atlas. Options: ho, cc200 and cc400, default: cc200.')
parser.add_argument('--download', default=True, type=str2bool,
help='Dowload data or just compute functional connectivity. default: True')
parser.add_argument('--root_path', default="/home/root/", type=str, help='The path of the folder containing the dataset folder.')
parser.add_argument('--id_file_path', default="subject_IDs.txt", type=str, help='The path to subject_IDs.txt.')
args = parser.parse_args()
main(args) | 4,095 | 39.156863 | 133 | py |
FBNETGEN | FBNETGEN-main/util/abide/preprocess_data.py | # Copyright (c) 2019 Mwiza Kunda
# Copyright (C) 2017 Sarah Parisot <s.parisot@imperial.ac.uk>, Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implcd ied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import warnings
import glob
import csv
import numpy as np
import scipy.io as sio
from nilearn import connectome
import pandas as pd
from scipy.spatial import distance
from scipy import signal
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
warnings.filterwarnings("ignore")
# Input data variables
class Reader:
def __init__(self, root_path, id_file_path=None) -> None:
root_folder = root_path
self.data_folder = os.path.join(root_folder, 'ABIDE_pcp/cpac/filt_noglobal')
self.phenotype = os.path.join(root_folder, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')
self.id_file = id_file_path
def fetch_filenames(self, subject_IDs, file_type, atlas):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
filemapping : resulting file name format
returns:
filenames : list of filetypes (same length as subject_list)
"""
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_' + atlas: '_rois_' + atlas + '.1D'}
# The list to be filled
filenames = []
# Fill list with requested file paths
for i in range(len(subject_IDs)):
os.chdir(self.data_folder)
find_files = glob.glob('*' + subject_IDs[i] + filemapping[file_type])
if len(find_files) > 0:
filenames.append(find_files[0])
else:
if os.path.isdir(self.data_folder + '/' + subject_IDs[i]):
os.chdir(self.data_folder + '/' + subject_IDs[i])
filenames.append(glob.glob('*' + subject_IDs[i] + filemapping[file_type])[0])
else:
filenames.append('N/A')
return filenames
# Get timeseries arrays for list of subjects
def get_timeseries(self, subject_list, atlas_name, silence=False):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
time_series : list of timeseries arrays, each of shape (timepoints x regions)
"""
timeseries = []
for i in range(len(subject_list)):
subject_folder = os.path.join(self.data_folder, subject_list[i])
ro_file = [f for f in os.listdir(subject_folder) if f.endswith('_rois_' + atlas_name + '.1D')]
fl = os.path.join(subject_folder, ro_file[0])
if silence != True:
print("Reading timeseries file %s" % fl)
timeseries.append(np.loadtxt(fl, skiprows=0))
return timeseries
# compute connectivity matrices
def subject_connectivity(self, timeseries, subjects, atlas_name, kind, iter_no='', seed=1234,
n_subjects='', save=True, save_path=None):
"""
timeseries : timeseries table for subject (timepoints x regions)
subjects : subject IDs
atlas_name : name of the parcellation atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
iter_no : tangent connectivity iteration number for cross validation evaluation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind in ['TPE', 'TE', 'correlation','partial correlation']:
if kind not in ['TPE', 'TE']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform(timeseries)
else:
if kind == 'TPE':
conn_measure = connectome.ConnectivityMeasure(kind='correlation')
conn_mat = conn_measure.fit_transform(timeseries)
conn_measure = connectome.ConnectivityMeasure(kind='tangent')
connectivity_fit = conn_measure.fit(conn_mat)
connectivity = connectivity_fit.transform(conn_mat)
else:
conn_measure = connectome.ConnectivityMeasure(kind='tangent')
connectivity_fit = conn_measure.fit(timeseries)
connectivity = connectivity_fit.transform(timeseries)
if save:
if not save_path:
save_path = self.data_folder
if kind not in ['TPE', 'TE']:
for i, subj_id in enumerate(subjects):
subject_file = os.path.join(save_path, subj_id,
subj_id + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity[i]})
return connectivity
else:
for i, subj_id in enumerate(subjects):
subject_file = os.path.join(save_path, subj_id,
subj_id + '_' + atlas_name + '_' + kind.replace(' ', '_') + '_' + str(
iter_no) + '_' + str(seed) + '_' + validation_ext + str(
n_subjects) + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity[i]})
return connectivity_fit
# Get the list of subject IDs
def get_ids(self, num_subjects=None):
"""
return:
subject_IDs : list of all subject IDs
"""
subject_IDs = np.genfromtxt(self.id_file, dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
# Get phenotype values for a list of subjects
def get_subject_score(self, subject_list, score):
scores_dict = {}
with open(self.phenotype) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
if row['SUB_ID'] in subject_list:
if score == 'HANDEDNESS_CATEGORY':
if (row[score].strip() == '-9999') or (row[score].strip() == ''):
scores_dict[row['SUB_ID']] = 'R'
elif row[score] == 'Mixed':
scores_dict[row['SUB_ID']] = 'Ambi'
elif row[score] == 'L->R':
scores_dict[row['SUB_ID']] = 'Ambi'
else:
scores_dict[row['SUB_ID']] = row[score]
elif (score == 'FIQ' or score == 'PIQ' or score == 'VIQ'):
if (row[score].strip() == '-9999') or (row[score].strip() == ''):
scores_dict[row['SUB_ID']] = 100
else:
scores_dict[row['SUB_ID']] = float(row[score])
else:
scores_dict[row['SUB_ID']] = row[score]
return scores_dict
# preprocess phenotypes. Categorical -> ordinal representation
@staticmethod
def preprocess_phenotypes(pheno_ft, params):
if params['model'] == 'MIDA':
ct = ColumnTransformer([("ordinal", OrdinalEncoder(), [0, 1, 2])], remainder='passthrough')
else:
ct = ColumnTransformer([("ordinal", OrdinalEncoder(), [0, 1, 2, 3])], remainder='passthrough')
pheno_ft = ct.fit_transform(pheno_ft)
pheno_ft = pheno_ft.astype('float32')
return (pheno_ft)
# create phenotype feature vector to concatenate with fmri feature vectors
@staticmethod
def phenotype_ft_vector(pheno_ft, num_subjects, params):
gender = pheno_ft[:, 0]
if params['model'] == 'MIDA':
eye = pheno_ft[:, 0]
hand = pheno_ft[:, 2]
age = pheno_ft[:, 3]
fiq = pheno_ft[:, 4]
else:
eye = pheno_ft[:, 2]
hand = pheno_ft[:, 3]
age = pheno_ft[:, 4]
fiq = pheno_ft[:, 5]
phenotype_ft = np.zeros((num_subjects, 4))
phenotype_ft_eye = np.zeros((num_subjects, 2))
phenotype_ft_hand = np.zeros((num_subjects, 3))
for i in range(num_subjects):
phenotype_ft[i, int(gender[i])] = 1
phenotype_ft[i, -2] = age[i]
phenotype_ft[i, -1] = fiq[i]
phenotype_ft_eye[i, int(eye[i])] = 1
phenotype_ft_hand[i, int(hand[i])] = 1
if params['model'] == 'MIDA':
phenotype_ft = np.concatenate([phenotype_ft, phenotype_ft_hand], axis=1)
else:
phenotype_ft = np.concatenate([phenotype_ft, phenotype_ft_hand, phenotype_ft_eye], axis=1)
return phenotype_ft
# Load precomputed fMRI connectivity networks
def get_networks(self, subject_list, kind, iter_no='', seed=1234, n_subjects='', atlas_name="aal",
variable='connectivity'):
"""
subject_list : list of subject IDs
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the parcellation atlas used
variable : variable name in the .mat file that has been used to save the precomputed networks
return:
matrix : feature matrix of connectivity networks (num_subjects x network_size)
"""
all_networks = []
for subject in subject_list:
if len(kind.split()) == 2:
kind = '_'.join(kind.split())
fl = os.path.join(self.data_folder, subject,
subject + "_" + atlas_name + "_" + kind.replace(' ', '_') + ".mat")
matrix = sio.loadmat(fl)[variable]
all_networks.append(matrix)
if kind in ['TE', 'TPE']:
norm_networks = [mat for mat in all_networks]
else:
norm_networks = [np.arctanh(mat) for mat in all_networks]
networks = np.stack(norm_networks)
return networks
| 11,247 | 40.201465 | 118 | py |
FBNETGEN | FBNETGEN-main/model/GSL.py | import torch
import torch.nn as nn
from torch.nn import functional as F
from model.cell import DCGRUCell
import numpy as np
from .model import GNNPredictor, ConvKRegion, Embed2GraphByLinear, GruKRegion, Embed2GraphByProduct
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def cosine_similarity_torch(x1, x2=None, eps=1e-8):
x2 = x1 if x2 is None else x2
w1 = x1.norm(p=2, dim=1, keepdim=True)
w2 = w1 if x2 is x1 else x2.norm(p=2, dim=1, keepdim=True)
return torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps)
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).to(device)
return -torch.autograd.Variable(torch.log(-torch.log(U + eps) + eps))
def gumbel_softmax_sample(logits, temperature, eps=1e-10):
sample = sample_gumbel(logits.size(), eps=eps)
y = logits + sample
return F.softmax(y / temperature, dim=-1)
def gumbel_softmax(logits, temperature, hard=False, eps=1e-10):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y_soft = gumbel_softmax_sample(logits, temperature=temperature, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
y_hard = torch.zeros(*shape).to(device)
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
y = torch.autograd.Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
class GCNPredictor(nn.Module):
def __init__(self, node_input_dim, roi_num=360):
super().__init__()
inner_dim = roi_num
self.roi_num = roi_num
self.project1 = nn.Sequential(
nn.Linear(node_input_dim, inner_dim),
nn.BatchNorm1d(roi_num),
nn.Dropout(p=0.4),
nn.LeakyReLU(negative_slope=0.33)
)
self.project2 = nn.Sequential(
nn.Linear(inner_dim, inner_dim),
nn.BatchNorm1d(roi_num),
nn.Dropout(p=0.4),
nn.LeakyReLU(negative_slope=0.33)
)
self.project3 = nn.Sequential(
nn.Linear(inner_dim, inner_dim),
nn.BatchNorm1d(roi_num),
nn.Dropout(p=0.4),
nn.LeakyReLU(negative_slope=0.33)
)
self.fcn = nn.Sequential(
nn.Linear(inner_dim, 32),
nn.LeakyReLU(negative_slope=0.33),
nn.Linear(32, 2)
)
def normalize(self, m):
left = torch.sum(m, dim=2, keepdim=True)
right = torch.sum(m, dim=1, keepdim=True)
normalize = 1.0/torch.sqrt(torch.bmm(left, right))
normalize[torch.isinf(normalize)] = 0
return torch.mul(m, normalize)
def forward(self, m, node_feature):
m = self.normalize(m)
x = self.project1(node_feature)
x = torch.bmm(m, node_feature)
x = self.project2(node_feature)
x = torch.bmm(m, node_feature)
x = self.project3(node_feature)
x = torch.sum(x, dim=1)
return self.fcn(x)
class Seq2SeqAttrs:
def __init__(self, num_nodes=360):
self.max_diffusion_step = 2
self.cl_decay_steps = 1000
self.filter_type = 'laplacian'
self.num_nodes = num_nodes
self.num_rnn_layers = 3
self.rnn_units = 1
self.hidden_state_size = self.num_nodes * self.rnn_units
class EncoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, seq_len, input_dim=1, num_nodes=360):
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, num_nodes=num_nodes)
self.input_dim = input_dim
self.seq_len = seq_len # for the encoder
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, adj, hidden_state=None):
"""
Encoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.input_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.hidden_state_size)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
batch_size, _ = inputs.size()
if hidden_state is None:
hidden_state = torch.zeros((self.num_rnn_layers, batch_size, self.hidden_state_size),
device=device)
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(
output, hidden_state[layer_num], adj)
hidden_states.append(next_hidden_state)
output = next_hidden_state
# runs in O(num_layers) so not too slow
return output, torch.stack(hidden_states)
class DecoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, horizn=32, num_nodes=360):
# super().__init__(is_training, adj_mx, **model_kwargs)
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, num_nodes=num_nodes)
self.output_dim = 1
self.horizn = horizn # for the decoder
self.projection_layer = nn.Linear(self.rnn_units, self.output_dim)
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, adj, hidden_state=None):
"""
:param inputs: shape (batch_size, self.num_nodes * self.output_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.num_nodes * self.output_dim)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(
output, hidden_state[layer_num], adj)
hidden_states.append(next_hidden_state)
output = next_hidden_state
projected = self.projection_layer(output.view(-1, self.rnn_units))
output = projected.view(-1, self.num_nodes * self.output_dim)
return output, torch.stack(hidden_states)
class TSConstruction(nn.Module, Seq2SeqAttrs):
def __init__(self, feature_dim=8, seq_len=64, node_num=360, discrete=True):
super().__init__()
Seq2SeqAttrs.__init__(self, num_nodes=node_num)
self.seq_len = seq_len
self.horizn_len = seq_len
self.encoder_model = EncoderModel(seq_len, num_nodes=self.num_nodes)
self.decoder_model = DecoderModel(seq_len, num_nodes=self.num_nodes)
self.discrete = discrete
self.extactor = GruKRegion(out_size=feature_dim)
# self.graph_generator = Embed2GraphByLinear(
# input_dim=feature_dim, roi_num=self.num_nodes)
self.graph_generator = Embed2GraphByProduct(
input_dim=feature_dim, roi_num=self.num_nodes)
def encoder(self, inputs, adj):
"""
Encoder forward pass
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:return: encoder_hidden_state: ( batch_size, self.hidden_state_size)
"""
encoder_hidden_state = None
for t in range(self.encoder_model.seq_len):
last_hidden_state, encoder_hidden_state = self.encoder_model(
inputs[t], adj, encoder_hidden_state)
return encoder_hidden_state
def decoder(self, encoder_hidden_state, adj):
"""
Decoder forward pass
:param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
:param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference]
:param batches_seen: global step [optional, not exist for inference]
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
batch_size = encoder_hidden_state.size(1)
go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim),
device=device)
decoder_hidden_state = encoder_hidden_state
decoder_input = go_symbol
outputs = []
for t in range(self.decoder_model.horizn):
decoder_output, decoder_hidden_state = self.decoder_model(decoder_input, adj,
decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
outputs = torch.stack(outputs)
return outputs
def calculate_random_walk_matrix(self, adj_mx):
# tf.Print(adj_mx, [adj_mx], message="This is adj: ")
adj_mx = adj_mx + torch.eye(int(adj_mx.shape[1])).to(device)
d = torch.sum(adj_mx, 2)
d_inv = 1. / d
d_inv = torch.where(torch.isinf(d_inv), torch.zeros(
d_inv.shape).to(device), d_inv)
d_mat_inv = torch.diag_embed(d_inv)
random_walk_mx = torch.bmm(d_mat_inv, adj_mx)
return random_walk_mx
def forward(self, full_seq, reconstruct_seq, node_feas, temperature):
"""
:param inputs: shape (batch_size, num_sensor, seq_len)
:param batches_seen: batches seen till now
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
extracted_feature = self.extactor(full_seq)
# if torch.any(torch.isnan(extracted_feature)):
# print('has nan1')
# extracted_feature = F.softmax(extracted_feature, dim=-1)
# if torch.any(torch.isnan(extracted_feature)):
# print('has nan2')
adj = self.graph_generator(extracted_feature)
if self.discrete:
adj = gumbel_softmax(
adj[:, :, :, 0], temperature=temperature, hard=True)
else:
adj = adj[:, :, :, 0]
# mask = torch.eye(self.num_nodes, self.num_nodes).to(device).byte()
mask = torch.eye(self.num_nodes, self.num_nodes).bool().to(device)
adj = torch.where(mask, torch.zeros(
mask.shape).to(device), adj)
random_walk_matrix = self.calculate_random_walk_matrix(adj)
random_walk_matrix = adj
reconstruct_seq = reconstruct_seq.permute(2,0,1)
encoder_hidden_state = self.encoder(reconstruct_seq, random_walk_matrix)
outputs = self.decoder(encoder_hidden_state, random_walk_matrix)
outputs = outputs.permute(1,2,0)
return outputs
class BrainGSLModel(nn.Module, Seq2SeqAttrs):
def __init__(self, feature_dim=8, seq_len=64, node_num=360, discrete=True):
super().__init__()
Seq2SeqAttrs.__init__(self, num_nodes=node_num)
self.seq_len = seq_len
self.horizn_len = seq_len
self.encoder_model = EncoderModel(seq_len, num_nodes=self.num_nodes)
self.decoder_model = DecoderModel(seq_len, num_nodes=self.num_nodes)
self.discrete = discrete
self.extactor = GruKRegion(out_size=feature_dim)
# self.graph_generator = Embed2GraphByLinear(
# input_dim=feature_dim, roi_num=self.num_nodes)
self.graph_generator = Embed2GraphByProduct(
input_dim=feature_dim, roi_num=self.num_nodes)
self.predictor = GCNPredictor(
node_input_dim=self.num_nodes, roi_num=self.num_nodes)
def encoder(self, inputs, adj):
"""
Encoder forward pass
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:return: encoder_hidden_state: ( batch_size, self.hidden_state_size)
"""
encoder_hidden_state = None
for t in range(self.encoder_model.seq_len):
last_hidden_state, encoder_hidden_state = self.encoder_model(
inputs[t], adj, encoder_hidden_state)
return encoder_hidden_state
def decoder(self, encoder_hidden_state, adj):
"""
Decoder forward pass
:param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
:param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference]
:param batches_seen: global step [optional, not exist for inference]
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
batch_size = encoder_hidden_state.size(1)
go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim),
device=device)
decoder_hidden_state = encoder_hidden_state
decoder_input = go_symbol
outputs = []
for t in range(self.decoder_model.horizn):
decoder_output, decoder_hidden_state = self.decoder_model(decoder_input, adj,
decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
outputs = torch.stack(outputs)
return outputs
def calculate_random_walk_matrix(self, adj_mx):
# tf.Print(adj_mx, [adj_mx], message="This is adj: ")
adj_mx = adj_mx + torch.eye(int(adj_mx.shape[1])).to(device)
d = torch.sum(adj_mx, 2)
d_inv = 1. / d
d_inv = torch.where(torch.isinf(d_inv), torch.zeros(
d_inv.shape).to(device), d_inv)
d_mat_inv = torch.diag_embed(d_inv)
random_walk_mx = torch.bmm(d_mat_inv, adj_mx)
return random_walk_mx
def forward(self, full_seq, reconstruct_seq, node_feas, temperature):
"""
:param inputs: shape (batch_size, num_sensor, seq_len)
:param batches_seen: batches seen till now
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
extracted_feature = self.extactor(full_seq)
# if torch.any(torch.isnan(extracted_feature)):
# print('has nan1')
# extracted_feature = F.softmax(extracted_feature, dim=-1)
# if torch.any(torch.isnan(extracted_feature)):
# print('has nan2')
adj = self.graph_generator(extracted_feature)
if self.discrete:
adj = gumbel_softmax(
adj[:, :, :, 0], temperature=temperature, hard=True)
else:
adj = adj[:, :, :, 0]
# mask = torch.eye(self.num_nodes, self.num_nodes).to(device).byte()
mask = torch.eye(self.num_nodes, self.num_nodes).bool().to(device)
adj = torch.where(mask, torch.zeros(
mask.shape).to(device), adj)
random_walk_matrix = self.calculate_random_walk_matrix(adj)
random_walk_matrix = adj
reconstruct_seq = reconstruct_seq.permute(2,0,1)
encoder_hidden_state = self.encoder(reconstruct_seq, random_walk_matrix)
outputs = self.decoder(encoder_hidden_state, random_walk_matrix)
outputs = outputs.permute(1,2,0)
adj = torch.where(mask, torch.ones(
mask.shape).to(device), adj)
prediction = self.predictor(adj, node_feas)
return outputs, prediction, adj
| 16,048 | 35.894253 | 119 | py |
FBNETGEN | FBNETGEN-main/model/model.py | from turtle import forward
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv1d, MaxPool1d, Linear, GRU
import math
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).cuda()
return -torch.autograd.Variable(torch.log(-torch.log(U + eps) + eps))
def gumbel_softmax_sample(logits, temperature, eps=1e-10):
sample = sample_gumbel(logits.size(), eps=eps)
y = logits + sample
return F.softmax(y / temperature, dim=-1)
def gumbel_softmax(logits, temperature, hard=False, eps=1e-10):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y_soft = gumbel_softmax_sample(logits, temperature=temperature, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
y_hard = torch.zeros(*shape).cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
y = torch.autograd.Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
class GruKRegion(nn.Module):
def __init__(self, kernel_size=8, layers=4, out_size=8, dropout=0.5):
super().__init__()
self.gru = GRU(kernel_size, kernel_size, layers,
bidirectional=True, batch_first=True)
self.kernel_size = kernel_size
self.linear = nn.Sequential(
nn.Dropout(dropout),
Linear(kernel_size*2, kernel_size),
nn.LeakyReLU(negative_slope=0.2),
Linear(kernel_size, out_size)
)
def forward(self, raw):
b, k, d = raw.shape
x = raw.view((b*k, -1, self.kernel_size))
x, h = self.gru(x)
x = x[:, -1, :]
x = x.view((b, k, -1))
x = self.linear(x)
return x
class ConvKRegion(nn.Module):
def __init__(self, k=1, out_size=8, kernel_size=8, pool_size=16, time_series=512):
super().__init__()
self.conv1 = Conv1d(in_channels=k, out_channels=32,
kernel_size=kernel_size, stride=2)
output_dim_1 = (time_series-kernel_size)//2+1
self.conv2 = Conv1d(in_channels=32, out_channels=32,
kernel_size=8)
output_dim_2 = output_dim_1 - 8 + 1
self.conv3 = Conv1d(in_channels=32, out_channels=16,
kernel_size=8)
output_dim_3 = output_dim_2 - 8 + 1
self.max_pool1 = MaxPool1d(pool_size)
output_dim_4 = output_dim_3 // pool_size * 16
self.in0 = nn.InstanceNorm1d(time_series)
self.in1 = nn.BatchNorm1d(32)
self.in2 = nn.BatchNorm1d(32)
self.in3 = nn.BatchNorm1d(16)
self.linear = nn.Sequential(
Linear(output_dim_4, 32),
nn.LeakyReLU(negative_slope=0.2),
Linear(32, out_size)
)
def forward(self, x):
b, k, d = x.shape
x = torch.transpose(x, 1, 2)
x = self.in0(x)
x = torch.transpose(x, 1, 2)
x = x.contiguous()
x = x.view((b*k, 1, d))
x = self.conv1(x)
x = self.in1(x)
x = self.conv2(x)
x = self.in2(x)
x = self.conv3(x)
x = self.in3(x)
x = self.max_pool1(x)
x = x.view((b, k, -1))
x = self.linear(x)
return x
class SeqenceModel(nn.Module):
def __init__(self, model_config, roi_num=360, time_series=512):
super().__init__()
if model_config['extractor_type'] == 'cnn':
self.extract = ConvKRegion(
out_size=model_config['embedding_size'], kernel_size=model_config['window_size'],
time_series=time_series, pool_size=4, )
elif model_config['extractor_type'] == 'gru':
self.extract = GruKRegion(
out_size=model_config['embedding_size'], kernel_size=model_config['window_size'],
layers=model_config['num_gru_layers'], dropout=model_config['dropout'])
self.linear = nn.Sequential(
Linear(model_config['embedding_size']*roi_num, 256),
nn.Dropout(model_config['dropout']),
nn.ReLU(),
Linear(256, 32),
nn.Dropout(model_config['dropout']),
nn.ReLU(),
Linear(32, 2)
)
def forward(self, x):
x = self.extract(x)
x = x.flatten(start_dim=1)
x = self.linear(x)
return x
class Embed2GraphByProduct(nn.Module):
def __init__(self, input_dim, roi_num=264):
super().__init__()
def forward(self, x):
m = torch.einsum('ijk,ipk->ijp', x, x)
m = torch.unsqueeze(m, -1)
return m
class Embed2GraphByLinear(nn.Module):
def __init__(self, input_dim, roi_num=360):
super().__init__()
self.fc_out = nn.Linear(input_dim * 2, input_dim)
self.fc_cat = nn.Linear(input_dim, 1)
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
off_diag = np.ones([roi_num, roi_num])
rel_rec = np.array(encode_onehot(
np.where(off_diag)[0]), dtype=np.float32)
rel_send = np.array(encode_onehot(
np.where(off_diag)[1]), dtype=np.float32)
self.rel_rec = torch.FloatTensor(rel_rec).cuda()
self.rel_send = torch.FloatTensor(rel_send).cuda()
def forward(self, x):
batch_sz, region_num, _ = x.shape
receivers = torch.matmul(self.rel_rec, x)
senders = torch.matmul(self.rel_send, x)
x = torch.cat([senders, receivers], dim=2)
x = torch.relu(self.fc_out(x))
x = self.fc_cat(x)
x = torch.relu(x)
m = torch.reshape(
x, (batch_sz, region_num, region_num, -1))
return m
class GNNPredictor(nn.Module):
def __init__(self, node_input_dim, roi_num=360):
super().__init__()
inner_dim = roi_num
self.roi_num = roi_num
self.gcn = nn.Sequential(
nn.Linear(node_input_dim, inner_dim),
nn.LeakyReLU(negative_slope=0.2),
Linear(inner_dim, inner_dim)
)
self.bn1 = torch.nn.BatchNorm1d(inner_dim)
self.gcn1 = nn.Sequential(
nn.Linear(inner_dim, inner_dim),
nn.LeakyReLU(negative_slope=0.2),
)
self.bn2 = torch.nn.BatchNorm1d(inner_dim)
self.gcn2 = nn.Sequential(
nn.Linear(inner_dim, 64),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(64, 8),
nn.LeakyReLU(negative_slope=0.2),
)
self.bn3 = torch.nn.BatchNorm1d(inner_dim)
self.fcn = nn.Sequential(
nn.Linear(8*roi_num, 256),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(256, 32),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(32, 2)
)
def forward(self, m, node_feature):
bz = m.shape[0]
x = torch.einsum('ijk,ijp->ijp', m, node_feature)
x = self.gcn(x)
x = x.reshape((bz*self.roi_num, -1))
x = self.bn1(x)
x = x.reshape((bz, self.roi_num, -1))
x = torch.einsum('ijk,ijp->ijp', m, x)
x = self.gcn1(x)
x = x.reshape((bz*self.roi_num, -1))
x = self.bn2(x)
x = x.reshape((bz, self.roi_num, -1))
x = torch.einsum('ijk,ijp->ijp', m, x)
x = self.gcn2(x)
x = self.bn3(x)
x = x.view(bz,-1)
return self.fcn(x)
class FBNETGEN(nn.Module):
def __init__(self, model_config, roi_num=360, node_feature_dim=360, time_series=512):
super().__init__()
self.graph_generation = model_config['graph_generation']
if model_config['extractor_type'] == 'cnn':
self.extract = ConvKRegion(
out_size=model_config['embedding_size'], kernel_size=model_config['window_size'],
time_series=time_series)
elif model_config['extractor_type'] == 'gru':
self.extract = GruKRegion(
out_size=model_config['embedding_size'], kernel_size=model_config['window_size'],
layers=model_config['num_gru_layers'])
if self.graph_generation == "linear":
self.emb2graph = Embed2GraphByLinear(
model_config['embedding_size'], roi_num=roi_num)
elif self.graph_generation == "product":
self.emb2graph = Embed2GraphByProduct(
model_config['embedding_size'], roi_num=roi_num)
self.predictor = GNNPredictor(node_feature_dim, roi_num=roi_num)
def forward(self, t, nodes):
x = self.extract(t)
x = F.softmax(x, dim=-1)
m = self.emb2graph(x)
m = m[:, :, :, 0]
bz, _, _ = m.shape
edge_variance = torch.mean(torch.var(m.reshape((bz, -1)), dim=1))
return self.predictor(m, nodes), m, edge_variance
class E2EBlock(torch.nn.Module):
'''E2Eblock.'''
def __init__(self, in_planes, planes, roi_num, bias=True):
super().__init__()
self.d = roi_num
self.cnn1 = torch.nn.Conv2d(in_planes, planes, (1, self.d), bias=bias)
self.cnn2 = torch.nn.Conv2d(in_planes, planes, (self.d, 1), bias=bias)
def forward(self, x):
a = self.cnn1(x)
b = self.cnn2(x)
return torch.cat([a]*self.d, 3)+torch.cat([b]*self.d, 2)
class BrainNetCNN(torch.nn.Module):
def __init__(self, roi_num):
super().__init__()
self.in_planes = 1
self.d = roi_num
self.e2econv1 = E2EBlock(1, 32, roi_num, bias=True)
self.e2econv2 = E2EBlock(32, 64, roi_num, bias=True)
self.E2N = torch.nn.Conv2d(64, 1, (1, self.d))
self.N2G = torch.nn.Conv2d(1, 256, (self.d, 1))
self.dense1 = torch.nn.Linear(256, 128)
self.dense2 = torch.nn.Linear(128, 30)
self.dense3 = torch.nn.Linear(30, 2)
def forward(self, x):
x = x.unsqueeze(dim=1)
out = F.leaky_relu(self.e2econv1(x), negative_slope=0.33)
out = F.leaky_relu(self.e2econv2(out), negative_slope=0.33)
out = F.leaky_relu(self.E2N(out), negative_slope=0.33)
out = F.dropout(F.leaky_relu(
self.N2G(out), negative_slope=0.33), p=0.5)
out = out.view(out.size(0), -1)
out = F.dropout(F.leaky_relu(
self.dense1(out), negative_slope=0.33), p=0.5)
out = F.dropout(F.leaky_relu(
self.dense2(out), negative_slope=0.33), p=0.5)
out = F.leaky_relu(self.dense3(out), negative_slope=0.33)
return out
class FCNet(nn.Module):
def __init__(self, node_size, seq_len, kernel_size=3):
super().__init__()
self.ind1, self.ind2 = torch.triu_indices(node_size, node_size, offset=1)
seq_len -= kernel_size//2*2
channel1 = 32
self.block1 = nn.Sequential(
Conv1d(in_channels=1, out_channels=channel1,
kernel_size=kernel_size),
nn.BatchNorm1d(channel1),
nn.LeakyReLU(),
nn.MaxPool1d(kernel_size=2, stride=2)
)
seq_len //= 2
seq_len -= kernel_size//2*2
channel2 = 64
self.block2 = nn.Sequential(
Conv1d(in_channels=channel1, out_channels=channel2,
kernel_size=kernel_size),
nn.BatchNorm1d(channel2),
nn.LeakyReLU(),
nn.MaxPool1d(kernel_size=2, stride=2)
)
seq_len //= 2
seq_len -= kernel_size//2*2
channel3 = 96
self.block3 = nn.Sequential(
Conv1d(in_channels=channel2, out_channels=channel3,
kernel_size=kernel_size),
nn.BatchNorm1d(channel3),
nn.LeakyReLU()
)
channel4 = 64
self.block4 = nn.Sequential(
Conv1d(in_channels=channel3, out_channels=channel4,
kernel_size=kernel_size),
Conv1d(in_channels=channel4, out_channels=channel4,
kernel_size=kernel_size),
nn.MaxPool1d(kernel_size=2, stride=2)
)
seq_len -= kernel_size//2*2
seq_len -= kernel_size//2*2
seq_len //= 2
self.fc = nn.Linear(in_features=seq_len*channel4, out_features=32)
self.diff_mode = nn.Sequential(
nn.Linear(in_features=32*2, out_features=32),
nn.Linear(in_features=32, out_features=32),
nn.Linear(in_features=32, out_features=2)
)
def forward(self, x):
bz, _, time_series = x.shape
x = x.reshape((bz*2, 1, time_series))
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = x.reshape((bz, 2, -1))
x = self.fc(x)
x = x.reshape((bz, -1))
diff = self.diff_mode(x)
return diff
| 13,552 | 29.050998 | 97 | py |
FBNETGEN | FBNETGEN-main/model/__init__.py | from .GSL import BrainGSLModel, TSConstruction
from .model import FBNETGEN, GNNPredictor, SeqenceModel, BrainNetCNN | 115 | 57 | 68 | py |
FBNETGEN | FBNETGEN-main/model/cell.py | import numpy as np
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: torch.nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = torch.nn.Parameter(torch.empty(*shape, device=device))
torch.nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = torch.nn.Parameter(torch.empty(length, device=device))
torch.nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class DCGRUCell(torch.nn.Module):
def __init__(self, num_units, max_diffusion_step, num_nodes, nonlinearity='tanh',
filter_type="laplacian", use_gc_for_ru=True):
"""
:param num_units:
:param adj_mx:
:param max_diffusion_step:
:param num_nodes:
:param nonlinearity:
:param filter_type: "laplacian", "random_walk", "dual_random_walk".
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super().__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
# support other nonlinearities up here?
self._num_nodes = num_nodes
self._num_units = num_units
self._max_diffusion_step = max_diffusion_step
self._supports = []
self._use_gc_for_ru = use_gc_for_ru
self._fc_params = LayerParams(self, 'fc')
self._gconv_params = LayerParams(self, 'gconv')
@staticmethod
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
# this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
return L
def forward(self, inputs, hx, adj):
"""Gated recurrent unit (GRU) with Graph Convolution.
:param inputs: (B, num_nodes * input_dim)
:param hx: (B, num_nodes * rnn_units)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`.
"""
# adj_mx = self._calculate_random_walk_matrix(adj)
adj_mx = adj.permute(0, 2, 1)
output_size = 2 * self._num_units
if self._use_gc_for_ru:
fn = self._gconv
else:
fn = self._fc
value = torch.sigmoid(
fn(inputs, adj_mx, hx, output_size, bias_start=1.0))
value = torch.reshape(value, (-1, self._num_nodes, output_size))
r, u = torch.split(
tensor=value, split_size_or_sections=self._num_units, dim=-1)
r = torch.reshape(r, (-1, self._num_nodes * self._num_units))
u = torch.reshape(u, (-1, self._num_nodes * self._num_units))
c = self._gconv(inputs, adj_mx, r * hx, self._num_units)
if self._activation is not None:
c = self._activation(c)
new_state = u * hx + (1.0 - u) * c
return new_state
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _fc(self, inputs, state, output_size, bias_start=0.0):
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1))
state = torch.reshape(state, (batch_size * self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=-1)
input_size = inputs_and_state.shape[-1]
weights = self._fc_params.get_weights((input_size, output_size))
value = torch.sigmoid(torch.matmul(inputs_and_state, weights))
biases = self._fc_params.get_biases(output_size, bias_start)
value += biases
return value
def _gconv(self, inputs, adj_mx, state, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
state = torch.reshape(state, (batch_size, self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs_and_state.size(2)
# batch_size, num_nodes, input_dim
x = inputs_and_state
x0 = x
# x0 = torch.reshape(
# x0, shape=[self._num_nodes, input_size * batch_size])
x = torch.unsqueeze(x0, 0)
if self._max_diffusion_step == 0:
pass
else:
x1 = torch.bmm(adj_mx, x0)
x = self._concat(x, x1)
for k in range(2, self._max_diffusion_step + 1):
x2 = 2 * torch.bmm(adj_mx, x1) - x0
x = self._concat(x, x2)
x1, x0 = x2, x1
num_matrices = self._max_diffusion_step + 1 # Adds for x itself.
# order, batch_size, num_nodes, input_dim
x = x.permute(1, 2, 3, 0) # (batch_size, num_nodes, input_size, order)
x = torch.reshape(
x, shape=[batch_size * self._num_nodes, input_size * num_matrices])
weights = self._gconv_params.get_weights(
(input_size * num_matrices, output_size))
# (batch_size * self._num_nodes, output_size)
x = torch.matmul(x, weights)
biases = self._gconv_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
| 6,299 | 38.873418 | 105 | py |
dynet | dynet-master/setup.py | import distutils.sysconfig
import logging as log
import platform
import zipfile
import sys
from distutils.command.build import build as _build
from distutils.command.build_py import build_py as _build_py
from distutils.command.install_data import install_data as _install_data
from distutils.errors import DistutilsSetupError
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_lib
from multiprocessing import cpu_count
from subprocess import Popen
import os
import re
from Cython.Distutils import build_ext as _build_ext
from setuptools import setup
from setuptools.extension import Extension
from shutil import rmtree, copytree, copy
# urlretrieve has a different location in Python 2 and Python 3
import urllib
if hasattr(urllib, "urlretrieve"):
urlretrieve = urllib.urlretrieve
else:
import urllib.request
urlretrieve = urllib.request.urlretrieve
def run_process(cmds):
p = Popen(cmds)
p.wait()
return p.returncode
def append_cmake_list(l, var):
if var:
l.extend(var.split(";"))
def append_cmake_lib_list(l, var):
if var:
l.extend(map(strip_lib, var.split(";")))
# Strip library prefixes and suffixes to prevent linker confusion
def strip_lib(filename):
filename = re.sub(r"^(?:lib)?(.*)\.(?:so|a|dylib)$", r"\1", filename)
filename = re.sub(r"^(.*)\.lib$", r"\1", filename)
return filename
def get_env(build_dir):
# Get environmental variables first
ENV = dict(os.environ)
# Get values listed in the CMakeCache.txt file (if existant)
try:
var_regex = r"^([^:]+):([^=]+)=(.*)$"
cache_path = os.path.join(build_dir, "CMakeCache.txt")
with open(cache_path, "r") as cache_file:
for line in cache_file:
line = line.strip()
m = re.match(var_regex, line)
if m:
ENV[m.group(1)] = m.group(3)
except:
pass
# Get values passed on the command line
i = 0
for i, arg in enumerate(sys.argv[1:]):
try:
key, value = arg.split("=", 1)
except ValueError:
break
ENV[key] = value
del sys.argv[1:i+1]
return ENV
log.basicConfig(stream=sys.stdout, level=log.INFO)
# Find the current directory
try:
this_file = __file__
except NameError:
this_file = sys.argv[0]
ORIG_DIR = os.getcwd()
SCRIPT_DIR = os.path.dirname(os.path.abspath(this_file))
if ORIG_DIR.rstrip('/').endswith('python'):
BUILD_DIR = ORIG_DIR.rstrip('/').rstrip('python')
PYTHON_DIR = ORIG_DIR
else:
BUILD_DIR = ORIG_DIR
PYTHON_DIR = ORIG_DIR + '/python'
ENV = get_env(BUILD_DIR)
# Find the paths
BUILT_EXTENSIONS = False
CMAKE_PATH = ENV.get("CMAKE", find_executable("cmake"))
MAKE_PATH = ENV.get("MAKE", find_executable("make"))
MAKE_FLAGS = ENV.get("MAKE_FLAGS", "-j %d" % cpu_count()).split()
CC_PATH = ENV.get("CC", find_executable("gcc"))
CXX_PATH = ENV.get("CXX", find_executable("g++"))
INSTALL_PREFIX = os.path.join(get_python_lib(), os.pardir, os.pardir, os.pardir)
PYTHON = sys.executable
# Try to find Eigen
EIGEN3_INCLUDE_DIR = ENV.get("EIGEN3_INCLUDE_DIR") # directory where eigen is saved
# The cmake directory and Python directory are different in manual install, so
# will break if relative path is specified. Try moving up if path is specified
# but not found
if (EIGEN3_INCLUDE_DIR is not None and
not os.path.isdir(EIGEN3_INCLUDE_DIR) and
os.path.isdir(os.path.join(os.pardir, EIGEN3_INCLUDE_DIR))):
EIGEN3_INCLUDE_DIR = os.path.join(os.pardir, EIGEN3_INCLUDE_DIR)
EIGEN3_DOWNLOAD_URL = ENV.get("EIGEN3_DOWNLOAD_URL", "https://github.com/clab/dynet/releases/download/2.1/eigen-b2e267dc99d4.zip")
# Remove the "-Wstrict-prototypes" compiler option, which isn't valid for C++.
cfg_vars = distutils.sysconfig.get_config_vars()
CFLAGS = cfg_vars.get("CFLAGS")
if CFLAGS is not None:
cfg_vars["CFLAGS"] = CFLAGS.replace("-Wstrict-prototypes", "")
# For Cython extensions
LIBRARIES = ["dynet"]
LIBRARY_DIRS = ["."]
COMPILER_ARGS = []
EXTRA_LINK_ARGS = []
RUNTIME_LIB_DIRS = []
INCLUDE_DIRS = []
DATA_FILES=[]
# Add all environment variables from CMake for Cython extensions
append_cmake_lib_list(LIBRARIES, ENV.get("CUDA_CUBLAS_FILES"))
append_cmake_list(LIBRARY_DIRS, ENV.get("CUDA_CUBLAS_DIRS"))
CMAKE_INSTALL_PREFIX = ENV.get("CMAKE_INSTALL_PREFIX", INSTALL_PREFIX)
LIBS_INSTALL_DIR = CMAKE_INSTALL_PREFIX + "/lib/"
PROJECT_SOURCE_DIR = ENV.get("PROJECT_SOURCE_DIR", SCRIPT_DIR) # location of the main dynet directory
PROJECT_BINARY_DIR = ENV.get("PROJECT_BINARY_DIR", BUILD_DIR) # path where dynet is built
DYNET_LIB_DIR = PROJECT_BINARY_DIR + "/dynet/"
if ENV.get("MSVC") == "1":
COMPILER_ARGS[:] = ["-DNOMINMAX", "/EHsc"]
DYNET_LIB_DIR += "/Release/"
# For MSVC, we compile dynet as a static lib, so we need to also link in the
# other libraries it depends on:
append_cmake_lib_list(LIBRARIES, ENV.get("LIBS"))
append_cmake_list(LIBRARY_DIRS, ENV.get("MKL_LINK_DIRS")) # Add the MKL dirs, if MKL is being used
append_cmake_lib_list(LIBRARIES, ENV.get("CUDA_RT_FILES"))
append_cmake_list(LIBRARY_DIRS, ENV.get("CUDA_RT_DIRS"))
DATA_FILES += [DYNET_LIB_DIR + lib + ".lib" for lib in LIBRARIES]
else:
COMPILER_ARGS[:] = ["-std=c++11", "-Wno-unused-function"]
RUNTIME_LIB_DIRS.extend([DYNET_LIB_DIR, LIBS_INSTALL_DIR])
# in some OSX systems, the following extra flags are needed:
if platform.system() == "Darwin":
COMPILER_ARGS.extend(["-stdlib=libc++", "-mmacosx-version-min=10.7"])
EXTRA_LINK_ARGS.append("-Wl,-rpath," + LIBS_INSTALL_DIR)
if "--skip-build" not in sys.argv: # Include libdynet.dylib unless doing manual install
DATA_FILES += [os.path.join(LIBS_INSTALL_DIR, "lib%s.dylib" % lib) for lib in LIBRARIES]
else:
EXTRA_LINK_ARGS.append("-Wl,-rpath=%r" % LIBS_INSTALL_DIR + ",--no-as-needed")
LIBRARY_DIRS.insert(0, DYNET_LIB_DIR)
INCLUDE_DIRS[:] = filter(None, [PROJECT_SOURCE_DIR, EIGEN3_INCLUDE_DIR])
TARGET = [Extension(
"_dynet", # name of extension
[PYTHON_DIR + "/_dynet.pyx"], # filename of our Pyrex/Cython source
language="c++", # this causes Pyrex/Cython to create C++ source
include_dirs=INCLUDE_DIRS,
libraries=LIBRARIES,
library_dirs=LIBRARY_DIRS,
extra_link_args=EXTRA_LINK_ARGS,
extra_compile_args=COMPILER_ARGS,
runtime_library_dirs=RUNTIME_LIB_DIRS,
)]
class build(_build):
user_options = [
("build-dir=", None, "New or existing DyNet build directory."),
("skip-build", None, "Assume DyNet C++ library is already built."),
]
def __init__(self, *args, **kwargs):
self.build_dir = None
self.skip_build = False
_build.__init__(self, *args, **kwargs)
def initialize_options(self):
py_version = "%s.%s" % (sys.version_info[0], sys.version_info[1])
unicode_suffix = "u" if sys.version_info[0] == 2 and sys.maxunicode > 65536 else ""
build_name = "py%s%s-%s" % (py_version, unicode_suffix, platform.architecture()[0])
self.build_dir = os.path.join(SCRIPT_DIR, "build", build_name)
_build.initialize_options(self)
def run(self):
global BUILD_DIR, BUILT_EXTENSIONS, EIGEN3_INCLUDE_DIR
BUILD_DIR = os.path.abspath(self.build_dir)
if EIGEN3_INCLUDE_DIR is None:
EIGEN3_INCLUDE_DIR = os.path.join(BUILD_DIR, "eigen")
EIGEN3_INCLUDE_DIR = os.path.abspath(EIGEN3_INCLUDE_DIR)
log.info("CMAKE_PATH=%r" % CMAKE_PATH)
log.info("MAKE_PATH=%r" % MAKE_PATH)
log.info("MAKE_FLAGS=%r" % " ".join(MAKE_FLAGS))
log.info("EIGEN3_INCLUDE_DIR=%r" % EIGEN3_INCLUDE_DIR)
log.info("EIGEN3_DOWNLOAD_URL=%r" % EIGEN3_DOWNLOAD_URL)
log.info("CC_PATH=%r" % CC_PATH)
log.info("CXX_PATH=%r" % CXX_PATH)
log.info("SCRIPT_DIR=%r" % SCRIPT_DIR)
log.info("BUILD_DIR=%r" % BUILD_DIR)
log.info("INSTALL_PREFIX=%r" % INSTALL_PREFIX)
log.info("PYTHON=%r" % PYTHON)
if CMAKE_PATH is not None:
run_process([CMAKE_PATH, "--version"])
if CXX_PATH is not None:
run_process([CXX_PATH, "--version"])
# This will generally be called by the pip install
if not self.skip_build:
if CMAKE_PATH is None:
raise DistutilsSetupError("`cmake` not found, and `CMAKE` is not set.")
if MAKE_PATH is None:
raise DistutilsSetupError("`make` not found, and `MAKE` is not set.")
if CC_PATH is None:
raise DistutilsSetupError("`gcc` not found, and `CC` is not set.")
if CXX_PATH is None:
raise DistutilsSetupError("`g++` not found, and `CXX` is not set.")
# Prepare folders
if not os.path.isdir(BUILD_DIR):
log.info("Creating build directory " + BUILD_DIR)
os.makedirs(BUILD_DIR)
os.chdir(BUILD_DIR)
if os.path.isdir(EIGEN3_INCLUDE_DIR):
log.info("Found eigen in " + EIGEN3_INCLUDE_DIR)
else:
try:
# Can use BZ2 or zip, right now using zip
# log.info("Fetching Eigen...")
# urlretrieve(EIGEN3_DOWNLOAD_URL, "eigen.tar.bz2")
# log.info("Unpacking Eigen...")
# tfile = tarfile.open("eigen.tar.bz2", 'r')
# tfile.extractall('eigen')
log.info("Fetching Eigen...")
urlretrieve(EIGEN3_DOWNLOAD_URL, "eigen.zip")
except Exception as e:
raise DistutilsSetupError("Could not download Eigen from %r: %s" % (EIGEN3_DOWNLOAD_URL, e))
try:
log.info("Unpacking Eigen...")
os.mkdir(EIGEN3_INCLUDE_DIR)
with zipfile.ZipFile("eigen.zip") as zfile:
zfile.extractall(EIGEN3_INCLUDE_DIR)
except Exception as e:
raise DistutilsSetupError("Could not extract Eigen to %r: %s" % (EIGEN3_INCLUDE_DIR, e))
os.environ["CXX"] = CXX_PATH
os.environ["CC"] = CC_PATH
# Build module
cmake_cmd = [
CMAKE_PATH,
SCRIPT_DIR,
"-DCMAKE_INSTALL_PREFIX=%r" % INSTALL_PREFIX,
"-DEIGEN3_INCLUDE_DIR=%r" % EIGEN3_INCLUDE_DIR,
"-DPYTHON=%r" % PYTHON,
]
for env_var in ("BACKEND", "CUDNN_ROOT", "CUDA_TOOLKIT_ROOT_DIR"):
value = ENV.get(env_var)
if value is not None:
cmake_cmd.append("-D" + env_var + "=%r" % value)
log.info("Configuring...")
if run_process(cmake_cmd) != 0:
raise DistutilsSetupError(" ".join(cmake_cmd))
make_cmd = [MAKE_PATH] + MAKE_FLAGS
log.info("Compiling...")
if run_process(make_cmd) != 0:
raise DistutilsSetupError(" ".join(make_cmd))
make_cmd = [MAKE_PATH, "install"]
log.info("Installing...")
if run_process(make_cmd) != 0:
raise DistutilsSetupError(" ".join(make_cmd))
if platform.system() == "Darwin": # macOS
for filename in DATA_FILES:
new_install_name = "@loader_path/" + os.path.basename(filename)
install_name_tool_cmd = ["install_name_tool", "-id", new_install_name, filename]
log.info("fixing install_name for %s to %r" % (filename, new_install_name))
if run_process(install_name_tool_cmd) != 0:
raise DistutilsSetupError(" ".join(install_name_tool_cmd))
# This will generally be called by the manual install
elif not os.path.isdir(EIGEN3_INCLUDE_DIR):
raise RuntimeError("Could not find Eigen in EIGEN3_INCLUDE_DIR={}. If doing manual install, please set the EIGEN3_INCLUDE_DIR variable with the absolute path to Eigen manually. If doing install via pip, please file an issue on github.com/clab/dynet".format(EIGEN3_INCLUDE_DIR))
BUILT_EXTENSIONS = True # because make calls build_ext
_build.run(self)
class build_py(_build_py):
def run(self):
os.chdir(os.path.join(BUILD_DIR, "python"))
log.info("Building Python files...")
_build_py.run(self)
class install_data(_install_data):
def run(self):
self.data_files = [(p, f) if self.is_wheel(p) else
(get_python_lib(), f) if platform.system() == "Darwin" else
(p, []) for p, f in self.data_files]
_install_data.run(self)
def is_wheel(self, path):
return os.path.basename(os.path.abspath(os.path.join(self.install_dir, path))) == "wheel"
class build_ext(_build_ext):
def run(self):
if BUILT_EXTENSIONS:
INCLUDE_DIRS.append(EIGEN3_INCLUDE_DIR)
LIBRARY_DIRS.append(BUILD_DIR + "/dynet/")
log.info("Building Cython extensions...")
log.info("INCLUDE_DIRS=%r" % " ".join(INCLUDE_DIRS))
log.info("LIBRARIES=%r" % " ".join(LIBRARIES))
log.info("LIBRARY_DIRS=%r" % " ".join(LIBRARY_DIRS))
log.info("COMPILER_ARGS=%r" % " ".join(COMPILER_ARGS))
log.info("EXTRA_LINK_ARGS=%r" % " ".join(EXTRA_LINK_ARGS))
log.info("RUNTIME_LIB_DIRS=%r" % " ".join(RUNTIME_LIB_DIRS))
_build_ext.run(self)
if os.path.abspath(".") != SCRIPT_DIR:
log.info("Copying built extensions...")
for d in os.listdir("build"):
target_dir = os.path.join(SCRIPT_DIR, "build", d)
rmtree(target_dir, ignore_errors=True)
try:
copytree(os.path.join("build", d), target_dir)
except OSError as e:
log.info("Cannot copy %s %s" % (os.path.join("build",d), e))
try:
with open(os.path.join(SCRIPT_DIR, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
except:
long_description = ""
setup(
name="dyNET",
# version="0.0.0",
install_requires=["cython", "numpy"],
description="The Dynamic Neural Network Toolkit",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft",
"Operating System :: Microsoft :: Windows",
"Programming Language :: C++",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
author="Graham Neubig",
author_email="dynet-users@googlegroups.com",
url="https://github.com/clab/dynet",
download_url="https://github.com/clab/dynet/releases",
license="Apache 2.0",
cmdclass={"build": build, "build_py": build_py, "install_data": install_data, "build_ext": build_ext},
ext_modules=TARGET,
py_modules=["dynet", "dynet_viz", "dynet_config"],
data_files=[(os.path.join("..", ".."), DATA_FILES)],
)
| 16,189 | 39.173697 | 289 | py |
dynet | dynet-master/examples/variational-autoencoder/basic-image-recon/utils.py | import os, struct
import numpy as np
import math
# adapted from https://github.com/clab/dynet/blob/master/examples/mnist/mnist-autobatch.py
def load_mnist(dataset, path):
"""
wget -O - http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz | gunzip > train-images-idx3-ubyte
wget -O - http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz | gunzip > train-labels-idx1-ubyte
wget -O - http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz | gunzip > t10k-images-idx3-ubyte
wget -O - http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz | gunzip > t10k-labels-idx1-ubyte
"""
if dataset is "training":
fname_img = os.path.join(path, "train-images-idx3-ubyte")
fname_lbl = os.path.join(path, "train-labels-idx1-ubyte")
elif dataset is "testing":
fname_img = os.path.join(path, "t10k-images-idx3-ubyte")
fname_lbl = os.path.join(path, "t10k-labels-idx1-ubyte")
else:
raise ValueError("dataset must be 'testing' or 'training'")
# Load everything in numpy arrays
with open(fname_lbl, "rb") as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
labels = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, "rb") as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
images = np.multiply(
np.fromfile(fimg, dtype=np.uint8).reshape(len(labels), rows*cols),
1.0 / 255.0)
get_instance = lambda idx: (labels[idx], images[idx].reshape(1, 28, 28))
# Create an iterator which returns each image in turn
# for i in range(len(labels)):
# yield get_instance(i)
size_reset = lambda x: x.reshape(1, 28, 28)
return list(map(size_reset, images))
def make_grid(tensor, nrow=8, padding=2, pad_value=0):
"""Make a grid of images, via numpy.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The Final grid size is (B / nrow, nrow). Default is 8.
padding (int, optional): amount of padding. Default is 2.
pad_value (float, optional): Value for the padded pixels.
"""
if not (isinstance(tensor, np.ndarray) or
(isinstance(tensor, list) and all(isinstance(t, np.ndarray) for t in tensor))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = np.stack(tensor, 0)
if tensor.ndim == 2: # single image H x W
tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1]))
if tensor.ndim == 3:
if tensor.shape[0] == 1: # if single-channel, single image, convert to 3-channel
tensor = np.concatenate((tensor, tensor, tensor), 0)
tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1], tensor.shape[2]))
if tensor.ndim == 4 and tensor.shape[1] == 1: # single-channel images
tensor = np.concatenate((tensor, tensor, tensor), 1)
if tensor.shape[0] == 1:
return np.squeeze(tensor)
# make the mini-batch of images into a grid
nmaps = tensor.shape[0]
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding)
grid = np.ones((3, height * ymaps + padding, width * xmaps + padding)) * pad_value
k = 0
for y in range(ymaps):
for x in range(xmaps):
if k >= nmaps:
break
grid[:, y * height + padding:(y+1) * height,\
x * width + padding:(x+1) * width] = tensor[k]
k = k + 1
return grid
def pre_pillow_float_img_process(float_img):
img = float_img * 255
img = img.clip(0, 255).astype('uint8').transpose(1, 2, 0)
return img
def save_image(tensor, filename, nrow=8, padding=2, pad_value=0):
"""Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value)
im = Image.fromarray(pre_pillow_float_img_process(grid))
im.save(filename)
| 4,525 | 39.053097 | 108 | py |
dynet | dynet-master/examples/variational-autoencoder/basic-image-recon/vae.py | from __future__ import print_function
from utils import load_mnist, make_grid, pre_pillow_float_img_process, save_image
import numpy as np
import argparse
import dynet as dy
import os
if not os.path.exists('results'):
os.makedirs('results')
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--dynet-gpu', action='store_true', default=False,
help='enables DyNet CUDA training')
parser.add_argument('--dynet-gpus', type=int, default=1, metavar='N',
help='number of gpu devices to use')
parser.add_argument('--dynet-seed', type=int, default=None, metavar='N',
help='random seed (default: random inside DyNet)')
parser.add_argument('--dynet-mem', type=int, default=None, metavar='N',
help='allocating memory (default: default of DyNet 512MB)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
train_data = load_mnist('training', './data')
batch_size = args.batch_size
test_data = load_mnist('testing', './data')
def generate_batch_loader(data, batch_size):
i = 0
n = len(data)
while i + batch_size <= n:
yield np.asarray(data[i:i+batch_size])
i += batch_size
# if i < n:
# pass # last short batch ignored
# # yield data[i:]
class DynetLinear:
def __init__(self, dim_in, dim_out, dyParameterCollection):
assert(isinstance(dyParameterCollection, dy.ParameterCollection))
self.dim_in = dim_in
self.dim_out = dim_out
self.pW = dyParameterCollection.add_parameters((dim_out, dim_in))
self.pb = dyParameterCollection.add_parameters((dim_out))
def __call__(self, x):
assert(isinstance(x, dy.Expression))
self.W = dy.parameter(self.pW) # add parameters to graph as expressions # m2.add_parameters((8, len(inputs)))
self.b = dy.parameter(self.pb)
self.x = x
return self.W * self.x + self.b
pc = dy.ParameterCollection()
class VAE:
def __init__(self, dyParameterCollection):
assert (isinstance(dyParameterCollection, dy.ParameterCollection))
self.fc1 = DynetLinear(784, 400, dyParameterCollection)
self.fc21 = DynetLinear(400, 20, dyParameterCollection)
self.fc22 = DynetLinear(400, 20, dyParameterCollection)
self.fc3 = DynetLinear(20, 400, dyParameterCollection)
self.fc4 = DynetLinear(400, 784, dyParameterCollection)
self.relu = dy.rectify
self.sigmoid = dy.logistic
self.training = False
def encode(self, x):
h1 = self.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = dy.exp(logvar * 0.5)
eps = dy.random_normal(dim=std.dim()[0], mean=0.0, stddev=1.0)
return dy.cmult(eps, std) + mu
else:
return mu
def decode(self, z):
h3 = self.relu(self.fc3(z))
return self.sigmoid(self.fc4(h3))
def forward(self, x):
assert(isinstance(x, dy.Expression))
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
model = VAE(pc)
optimizer = dy.AdamTrainer(pc, alpha=1e-3) # alpha: initial learning rate
# # Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = dy.binary_log_loss(recon_x, x) # equiv to torch.nn.functional.binary_cross_entropy(?,?, size_average=False)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * dy.sum_elems(1 + logvar - dy.pow(mu, dy.scalarInput(2)) - dy.exp(logvar))
return BCE + KLD
def train(epoch):
model.training = True
train_loss = 0
train_loader = generate_batch_loader(train_data, batch_size=batch_size)
for batch_idx, data in enumerate(train_loader):
# Dymanic Construction of Graph
dy.renew_cg()
x = dy.inputTensor(data.reshape(-1, 784).T)
recon_x, mu, logvar = model.forward(x)
loss = loss_function(recon_x, x, mu, logvar)
# Forward
loss_value = loss.value()
train_loss += loss_value
# Backward
loss.backward()
optimizer.update()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_data),
100. * batch_idx / (len(train_data) / batch_size),
loss_value / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_data)))
def test(epoch):
model.training = False
test_loss = 0
test_loader = generate_batch_loader(test_data, batch_size=batch_size)
for i, data in enumerate(test_loader):
# Dymanic Construction of Graph
dy.renew_cg()
x = dy.inputTensor(data.reshape(-1, 784).T)
recon_x, mu, logvar = model.forward(x)
loss = loss_function(recon_x, x, mu, logvar)
# Forward
loss_value = loss.value()
test_loss += loss_value
if i == 0:
n = min(data.shape[0], 8)
comparison = np.concatenate([data[:n],
recon_x.npvalue().T.reshape(batch_size, 1, 28, 28)[:n]])
save_image(comparison,
'results/reconstruction_' + str(epoch) + '.png', nrow=n)
test_loss /= len(test_data)
print('====> Test set loss: {:.4f}'.format(test_loss))
import time
tictocs = []
for epoch in range(1, args.epochs + 1):
tic = time.time()
train(epoch)
test(epoch)
sample = dy.inputTensor(np.random.randn(20, 64))
sample = model.decode(sample)
save_image(sample.npvalue().T.reshape(64, 1, 28, 28),
'results/sample_' + str(epoch) + '.png')
toc = time.time()
tictocs.append(toc - tic)
print('############\n\n')
print('Total Time Cost:', np.sum(tictocs))
print('Epoch Time Cost', np.average(tictocs), '+-', np.std(tictocs) / np.sqrt(len(tictocs)))
print('\n\n############')
| 6,690 | 31.639024 | 118 | py |
dynet | dynet-master/examples/python-utils/util.py | import mmap
class Vocab:
def __init__(self, w2i):
self.w2i = dict(w2i)
self.i2w = {i:w for w,i in w2i.items()}
@classmethod
def from_corpus(cls, corpus):
w2i = {}
for sent in corpus:
for word in sent:
w2i.setdefault(word, len(w2i))
return Vocab(w2i)
def size(self):
return len(self.w2i.keys())
#This corpus reader can be used when reading large text file into a memory can solve IO bottleneck of training.
#Use it exactly as the regular CorpusReader from the rnnlm.py
class FastCorpusReader:
def __init__(self, fname):
self.fname = fname
self.f = open(fname, 'rb')
def __iter__(self):
#This usage of mmap is for a Linux\OS-X
#For Windows replace prot=mmap.PROT_READ with access=mmap.ACCESS_READ
m = mmap.mmap(self.f.fileno(), 0, prot=mmap.PROT_READ)
data = m.readline()
while data:
line = data
data = m.readline()
line = line.lower()
line = line.strip().split()
yield line
class CorpusReader:
def __init__(self, fname):
self.fname = fname
def __iter__(self):
for line in file(self.fname):
line = line.strip().split()
#line = [' ' if x == '' else x for x in line]
yield line
class CharsCorpusReader:
def __init__(self, fname, begin=None):
self.fname = fname
self.begin = begin
def __iter__(self):
begin = self.begin
with open(self.fname) as f:
for line in f:
line = list(line)
if begin:
line = [begin] + line
yield line
| 1,731 | 28.355932 | 111 | py |
dynet | dynet-master/examples/rnnlm/lstmlm-auto.py | from __future__ import print_function
from collections import defaultdict
import math
import random
import time
import dynet as dy
# path to Mikolov PTB train.txt and valid.txt
FLAGS_train = 'train.txt'
FLAGS_valid = 'valid.txt'
FLAGS_layers = 1
FLAGS_hidden_dim = 128
FLAGS_batch_size = 16
FLAGS_word_dim = 64
def shuffled_infinite_list(lst):
order = range(len(lst))
while True:
random.shuffle(order)
for i in order:
yield lst[i]
def read(filename, w2i):
stop_symbol = w2i["</s>"]
with open(filename, "r") as fh:
for line in fh:
sent = [w2i[x] for x in line.strip().split()]
sent.append(stop_symbol)
yield sent
class LSTMLM:
def __init__(self, model, vocab_size, start):
self.start = start
self.embeddings = model.add_lookup_parameters((vocab_size, FLAGS_word_dim))
self.rnn = dy.VanillaLSTMBuilder(FLAGS_layers,
FLAGS_word_dim,
FLAGS_hidden_dim,
model)
self.h2l = model.add_parameters((vocab_size, FLAGS_hidden_dim))
self.lb = model.add_parameters(vocab_size)
# Compute the LM loss for a single sentence.
def sent_lm_loss(self, sent):
rnn_cur = self.rnn.initial_state()
losses = []
prev_word = self.start
for word in sent:
x_t = self.embeddings[prev_word]
rnn_cur = rnn_cur.add_input(x_t)
logits = dy.affine_transform([self.lb,
self.h2l,
rnn_cur.output()])
losses.append(dy.pickneglogsoftmax(logits, word))
prev_word = word
return dy.esum(losses)
# "Naively" computed Loss for a minibatch of sentences
def minibatch_lm_loss(self, sents):
sent_losses = [self.sent_lm_loss(sent) for sent in sents]
minibatch_loss = dy.esum(sent_losses)
total_words = sum(len(sent) for sent in sents)
return minibatch_loss, total_words
print("RUN WITH AND WITHOUT --dynet_autobatch=1")
start = time.time()
updates = 100000
w2i = defaultdict(lambda: len(w2i))
start_symbol = w2i["<s>"]
train = list(read(FLAGS_train, w2i))
vocab_size = len(w2i)
valid = list(read(FLAGS_valid, w2i))
assert vocab_size == len(w2i) # Assert that vocab didn't grow.
model = dy.Model()
trainer = dy.AdamTrainer(model)
lm = LSTMLM(model, vocab_size, start_symbol)
print("startup time: %r" % (time.time() - start))
start = time.time()
epoch = all_sents = dev_time = all_words = this_words = this_loss = 0
random_training_instance = shuffled_infinite_list(train)
for updates in xrange(1, updates):
if updates % int(500 / FLAGS_batch_size) == 0:
trainer.status()
train_time = time.time() - start - dev_time
all_words += this_words
print("loss=%.4f, words per second=%.4f" %
(this_loss / this_words, all_words / train_time))
this_loss = this_words = 0
if updates % int(10000 / FLAGS_batch_size) == 0:
dev_start = time.time()
dev_loss = dev_words = 0
for i in xrange(0, len(valid), FLAGS_batch_size):
valid_minibatch = valid[i:i + FLAGS_batch_size]
dy.renew_cg() # Clear existing computation graph.
loss_exp, mb_words = lm.minibatch_lm_loss(valid_minibatch)
dev_loss += loss_exp.scalar_value()
dev_words += mb_words
dev_time = time.time() - dev_start
print("nll=%.4f, ppl=%.4f, words=%r, time=%.4f, word_per_sec=%.4f" % (
dev_loss / dev_words, math.exp(dev_loss / dev_words), dev_words,
dev_time, dev_words / dev_time))
# Compute loss for one training minibatch.
minibatch = [next(random_training_instance)
for _ in xrange(FLAGS_batch_size)]
dy.renew_cg() # Clear existing computation graph.
loss_exp, mb_words = lm.minibatch_lm_loss(minibatch)
this_loss += loss_exp.scalar_value()
this_words += mb_words
all_sents += FLAGS_batch_size
avg_minibatch_loss = loss_exp / len(minibatch)
avg_minibatch_loss.forward()
avg_minibatch_loss.backward()
trainer.update()
cur_epoch = int(all_sents / len(train))
if cur_epoch != epoch:
print("epoch %r finished" % cur_epoch)
epoch = cur_epoch
| 4,126 | 31.242188 | 79 | py |
dynet | dynet-master/examples/rnnlm/rnnlm.py | import dynet as dy
import time
import random
LAYERS = 2
INPUT_DIM = 256 #50 #256
HIDDEN_DIM = 256 # 50 #1024
VOCAB_SIZE = 0
from collections import defaultdict
from itertools import count
import argparse
import sys
import util
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder):
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
def save_to_disk(self, filename):
dy.save(filename, [self.builder, self.lookup, self.R, self.bias])
def load_from_disk(self, filename):
(self.builder, self.lookup, self.R, self.bias) = dy.load(filename, model)
def build_lm_graph(self, sent):
dy.renew_cg()
init_state = self.builder.initial_state()
errs = [] # will hold expressions
es=[]
state = init_state
for (cw,nw) in zip(sent,sent[1:]):
# assume word is already a word-id
x_t = dy.lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = self.bias + (self.R * y_t)
err = dy.pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = dy.esum(errs)
return nerr
def predict_next_word(self, sentence):
dy.renew_cg()
init_state = self.builder.initial_state()
state = init_state
for cw in sentence:
# assume word is already a word-id
x_t = self.lookup[int(cw)]
state = state.add_input(x_t)
y_t = state.output()
r_t = self.bias + (self.R * y_t)
prob = dy.softmax(r_t)
return prob
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
dy.renew_cg()
state = self.builder.initial_state()
cw = first
while True:
x_t = self.lookup[cw]
state = state.add_input(x_t)
y_t = state.output()
r_t = self.bias + (self.R * y_t)
ydist = dy.softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='Path to the corpus file.')
args = parser.parse_args()
train = util.CharsCorpusReader(args.corpus, begin="<s>")
vocab = util.Vocab.from_corpus(train)
VOCAB_SIZE = vocab.size()
model = dy.Model()
trainer = dy.SimpleSGDTrainer(model, learning_rate=1.0)
#lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder)
lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.LSTMBuilder)
train = list(train)
chars = loss = 0.0
for ITER in range(100):
random.shuffle(train)
for i,sent in enumerate(train):
_start = time.time()
if i % 50 == 0:
trainer.status()
if chars > 0: print(loss / chars,)
for _ in range(1):
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
loss = 0.0
chars = 0.0
chars += len(sent)-1
isent = [vocab.w2i[w] for w in sent]
errs = lm.build_lm_graph(isent)
loss += errs.scalar_value()
errs.backward()
trainer.update()
#print "TM:",(time.time() - _start)/len(sent)
print("ITER {}, loss={}".format(ITER, loss))
trainer.status()
lm.save_to_disk("RNNLanguageModel.model")
print("loading the saved model...")
lm.load_from_disk("RNNLanguageModel.model")
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
| 4,334 | 31.593985 | 105 | py |
dynet | dynet-master/examples/rnnlm/rnnlm_transduce.py | # a version rnnlm.py using the transduce() interface.
import dynet as dy
import time
import random
LAYERS = 2
INPUT_DIM = 50 #256
HIDDEN_DIM = 50 #1024
VOCAB_SIZE = 0
import argparse
import sys
import util
try:
from itertools import izip as zip
except ImportError:
pass
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder):
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
def BuildLMGraph(self, sent):
dy.renew_cg()
init_state = self.builder.initial_state()
errs = [] # will hold expressions
es=[]
state = init_state
inputs = [self.lookup[int(cw)] for cw in sent[:-1]]
expected_outputs = [int(nw) for nw in sent[1:]]
outputs = state.transduce(inputs)
r_ts = ((self.bias + (self.R * y_t)) for y_t in outputs)
errs = [dy.pickneglogsoftmax(r_t, eo) for r_t, eo in zip(r_ts, expected_outputs)]
nerr = dy.esum(errs)
return nerr
def sample(self, first=1, nchars=0, stop=-1):
# sampling must use the regular incremental interface.
res = [first]
dy.renew_cg()
state = self.builder.initial_state()
cw = first
while True:
x_t = self.lookup[cw]
state = state.add_input(x_t)
y_t = state.output()
r_t = self.bias + (self.R * y_t)
ydist = dy.softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='Path to the corpus file.')
args = parser.parse_args()
train = util.CharsCorpusReader(args.corpus, begin="<s>")
vocab = util.Vocab.from_corpus(train)
VOCAB_SIZE = vocab.size()
model = dy.Model()
trainer = dy.SimpleSGDTrainer(model)
builder = dy.SimpleRNNBuilder
# builder = dy.LSTMBuilder
lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=builder)
train = list(train)
chars = loss = 0.0
for ITER in range(100):
random.shuffle(train)
for i,sent in enumerate(train):
_start = time.time()
if i % 50 == 0:
trainer.status()
if chars > 0: print(loss / chars,)
for _ in range(1):
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
loss = 0.0
chars = 0.0
chars += len(sent)-1
isent = [vocab.w2i[w] for w in sent]
errs = lm.BuildLMGraph(isent)
loss += errs.scalar_value()
errs.backward()
trainer.update()
print("ITER",ITER,loss)
trainer.status()
| 3,338 | 30.205607 | 102 | py |
dynet | dynet-master/examples/mnist/mnist-autobatch.py | #! /usr/bin/env python3
import time
import random
import os
import struct
import argparse
import numpy as np
import dynet as dy
# To run this, download the four files from http://yann.lecun.com/exdb/mnist/
# using the --download option.
# Pass the path where the data should be stored (or is already stored)
# to the program with the --path option. You will also want to run
# with --dynet_autobatch=1. To turn on GPU training, run with
# --dynet_gpus=1.
parser = argparse.ArgumentParser()
parser.add_argument("--path", default=".",
help="Path to the MNIST data files (unzipped).")
parser.add_argument("--minibatch_size", default=16,
help="Size of minibatches.")
parser.add_argument("--conv", dest="conv", action="store_true")
parser.add_argument("--download", dest="download", action="store_true",
help="download and extract examples to path")
parser.add_argument("--dynet_autobatch", default=0,
help="Set to 1 to turn on autobatching.")
parser.add_argument("--dynet_gpus", default=0,
help="Set to 1 to train on GPU.")
HIDDEN_DIM = 1024
DROPOUT_RATE = 0.4
# minimally adapted from https://gist.github.com/akesling/5358964
def read_mnist(dataset, path):
if dataset is "training":
fname_img = os.path.join(path, "train-images-idx3-ubyte")
fname_lbl = os.path.join(path, "train-labels-idx1-ubyte")
elif dataset is "testing":
fname_img = os.path.join(path, "t10k-images-idx3-ubyte")
fname_lbl = os.path.join(path, "t10k-labels-idx1-ubyte")
else:
raise ValueError("dataset must be 'testing' or 'training'")
# Load everything in some numpy arrays
with open(fname_lbl, "rb") as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
labels = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, "rb") as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
images = np.multiply(
np.fromfile(fimg, dtype=np.uint8).reshape(len(labels), rows*cols),
1.0 / 255.0)
get_instance = lambda idx: (labels[idx], images[idx])
# Create an iterator which returns each image in turn
for i in range(len(labels)):
yield get_instance(i)
class MNISTClassify(object):
def __init__(self, m):
if args.conv:
# architecture from https://www.tensorflow.org/get_started/mnist/pros
self.F1 = m.add_parameters((5, 5, 1, 32))
self.b1 = m.add_parameters((32, ))
self.F2 = m.add_parameters((5, 5, 32, 64))
self.b2 = m.add_parameters((64, ))
input_size = 7 * 7 * 64
else:
input_size = 28 * 28
self.W1 = m.add_parameters((HIDDEN_DIM, input_size))
self.hbias = m.add_parameters((HIDDEN_DIM, ))
self.W2 = m.add_parameters((10, HIDDEN_DIM))
def __call__(self, x, dropout=False):
if args.conv:
x = dy.reshape(x, (28, 28, 1))
x = dy.conv2d_bias(x, self.F1, self.b1, [1, 1], is_valid=False)
x = dy.rectify(dy.maxpooling2d(x, [2, 2], [2, 2]))
x = dy.conv2d_bias(x, self.F2, self.b2, [1, 1], is_valid=False)
x = dy.rectify(dy.maxpooling2d(x, [2, 2], [2, 2])) # 7x7x64
x = dy.reshape(x, (7 * 7 * 64,))
h = dy.rectify(self.W1 * x + self.hbias)
if dropout:
h = dy.dropout(h, DROPOUT_RATE)
logits = self.W2 * h
return logits
def download_examples(path):
import gzip
import urllib.request
baseurl = "http://yann.lecun.com/exdb/mnist/"
for elem in ["train-images-idx3-ubyte.gz",
"train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
"t10k-labels-idx1-ubyte.gz"]:
print("downloading " + elem + " ...")
outfile = open(os.path.join(path, elem[:-3]), "wb")
downloaded = urllib.request.urlopen(baseurl + elem)
outfile.write(gzip.GzipFile(fileobj = downloaded).read())
if __name__ == "__main__":
args = parser.parse_args()
if args.download:
download_examples(args.path)
training = [(lbl, img) for (lbl, img) in read_mnist("training", args.path)]
testing = [(lbl, img) for (lbl, img) in read_mnist("testing", args.path)]
m = dy.Model()
classify = MNISTClassify(m)
sgd = dy.SimpleSGDTrainer(m, learning_rate=0.01)
eloss = None
alpha = 0.05 # smoothing of training loss for reporting
start = time.time()
dev_time = 0
report = args.minibatch_size * 30
dev_report = args.minibatch_size * 600
for epoch in range(50):
random.shuffle(training)
print(("Epoch {} starting".format(epoch+1)))
i = 0
while i < len(training):
dy.renew_cg()
mbsize = min(args.minibatch_size, len(training) - i)
minibatch = training[i:i+mbsize]
losses = []
for lbl, img in minibatch:
x = dy.inputVector(img)
logits = classify(x, dropout=True)
loss = dy.pickneglogsoftmax(logits, lbl)
losses.append(loss)
mbloss = dy.esum(losses) / mbsize
mbloss.backward()
sgd.update()
# eloss is an exponentially smoothed loss.
if eloss is None:
eloss = mbloss.scalar_value()
else:
eloss = mbloss.scalar_value() * alpha + eloss * (1.0 - alpha)
# Do dev evaluation here:
if (i > 0) and (i % dev_report == 0):
confusion = [[0 for _ in range(10)] for _ in range(10)]
correct = 0
dev_start = time.time()
for s in range(0, len(testing), args.minibatch_size):
dy.renew_cg()
e = min(len(testing), s + args.minibatch_size)
minibatch = testing[s:e]
scores = []
for lbl, img in minibatch:
x = dy.inputVector(img)
logits = classify(x)
scores.append((lbl, logits))
# This evaluates all the logits in a batch if autobatching is on.
dy.forward([logits for _, logits in scores])
# now we can retrieve the batch-computed logits cheaply
for lbl, logits in scores:
prediction = np.argmax(logits.npvalue())
if lbl == prediction:
correct += 1
confusion[prediction][lbl] += 1
dev_end = time.time()
acc = float(correct) / len(testing)
dev_time += dev_end - dev_start
print(("Held out accuracy {} ({} instances/sec)".format(
acc, len(testing) / (dev_end - dev_start))))
print(' ' + ''.join(('T'+str(x)).ljust(6) for x in range(10)))
for p, row in enumerate(confusion):
s = 'P' + str(p) + ' '
s += ''.join(str(col).ljust(6) for col in row)
print(s)
if (i > 0) and (i % report == 0):
print(("moving avg loss: {}".format(eloss)))
i += mbsize
end = time.time()
print(("instances per sec: {}".format(
(i + epoch * len(training)) / (end - start - dev_time))))
| 6,853 | 35.26455 | 78 | py |
dynet | dynet-master/examples/mnist/basic-mnist-benchmarks/mnist_dynet_autobatch.py | from __future__ import division
import os
import struct
import argparse
import random
import time
import numpy as np
# import dynet as dy
# import dynet_config
# dynet_config.set_gpu()
import dynet as dy
# First, download the MNIST dataset from the official website and decompress it.
# wget -O - http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz | gunzip > train-images.idx3-ubyte
# wget -O - http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz | gunzip > train-labels.idx1-ubyte
# wget -O - http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz | gunzip > t10k-images.idx3-ubyte
# wget -O - http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz | gunzip > t10k-labels.idx1-ubyte
parser = argparse.ArgumentParser(description='DyNet MNIST Example')
parser.add_argument("--path", type=str, default=".",
help="Path to the MNIST data files (unzipped).")
parser.add_argument('--batch-size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--log-interval', type=int, default=10,
help='how many batches to wait before logging training status')
parser.add_argument("--dynet_autobatch", type=int, default=0,
help="Set to 1 to turn on autobatching.")
parser.add_argument("--dynet_gpus", type=int, default=0,
help="Set to 1 to train on GPU.")
HIDDEN_DIM = 1024
DROPOUT_RATE = 0.4
# Adapted from https://gist.github.com/akesling/5358964
def read(dataset, path):
if dataset is "training":
fname_img = os.path.join(path, "train-images.idx3-ubyte")
fname_lbl = os.path.join(path, "train-labels.idx1-ubyte")
elif dataset is "testing":
fname_img = os.path.join(path, "t10k-images.idx3-ubyte")
fname_lbl = os.path.join(path, "t10k-labels.idx1-ubyte")
else:
raise ValueError("dataset must be 'training' or 'testing'")
with open(fname_lbl, 'rb') as flbl:
_, _ = struct.unpack(">II", flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
_, _, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.multiply(np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols), 1.0/255.0)
get_img = lambda idx: (lbl[idx], img[idx])
for i in range(len(lbl)):
yield get_img(i)
class mnist_network(object):
def __init__(self, m):
self.pConv1 = m.add_parameters((5, 5, 1, 32))
self.pB1 = m.add_parameters((32, ))
self.pConv2 = m.add_parameters((5, 5, 32, 64))
self.pB2 = m.add_parameters((64, ))
self.pW1 = m.add_parameters((HIDDEN_DIM, 7*7*64))
self.pB3 = m.add_parameters((HIDDEN_DIM, ))
self.pW2 = m.add_parameters((10, HIDDEN_DIM))
def __call__(self, inputs, dropout=False):
x = dy.inputTensor(inputs)
conv1 = dy.parameter(self.pConv1)
b1 = dy.parameter(self.pB1)
x = dy.conv2d_bias(x, conv1, b1, [1, 1], is_valid=False)
x = dy.rectify(dy.maxpooling2d(x, [2, 2], [2, 2]))
conv2 = dy.parameter(self.pConv2)
b2 = dy.parameter(self.pB2)
x = dy.conv2d_bias(x, conv2, b2, [1, 1], is_valid=False)
x = dy.rectify(dy.maxpooling2d(x, [2, 2], [2, 2]))
x = dy.reshape(x, (7*7*64, 1))
w1 = dy.parameter(self.pW1)
b3 = dy.parameter(self.pB3)
h = dy.rectify(w1*x+b3)
if dropout:
h = dy.dropout(h, DROPOUT_RATE)
w2 = dy.parameter(self.pW2)
output = w2*h
# output = dy.softmax(w2*h)
return output
def create_network_return_loss(self, inputs, expected_output, dropout=False):
out = self(inputs, dropout)
loss = dy.pickneglogsoftmax(out, expected_output)
# loss = -dy.log(dy.pick(out, expected_output))
return loss
def create_network_return_best(self, inputs, dropout=False):
out = self(inputs, dropout)
out = dy.softmax(out)
return np.argmax(out.npvalue())
# return np.argmax(out.npvalue())
args = parser.parse_args()
train_data = [(lbl, img) for (lbl, img) in read("training", args.path)]
test_data = [(lbl, img) for (lbl, img) in read("testing", args.path)]
m = dy.ParameterCollection()
network = mnist_network(m)
trainer = dy.SimpleSGDTrainer(m, learning_rate=args.lr)
def train(epoch):
random.shuffle(train_data)
i = 0
epoch_start = time.time()
while i < len(train_data):
dy.renew_cg()
losses = []
for lbl, img in train_data[i:i+args.batch_size]:
loss = network.create_network_return_loss(img, lbl, dropout=True)
losses.append(loss)
mbloss = dy.average(losses)
if (int(i/args.batch_size)) % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, i, len(train_data),
100. * i/len(train_data), mbloss.value()))
mbloss.backward()
trainer.update()
i += args.batch_size
epoch_end = time.time()
print("{} s per epoch".format(epoch_end-epoch_start))
def test():
correct = 0
dy.renew_cg()
losses = []
for lbl, img in test_data:
losses.append(network.create_network_return_loss(img, lbl, dropout=False))
if lbl == network.create_network_return_best(img, dropout=False):
correct += 1
mbloss = dy.average(losses)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
mbloss.value(), correct, len(test_data),
100. * correct / len(test_data)))
for epoch in range(1, args.epochs + 1):
train(epoch)
test()
# m.save("/tmp/tmp.model")
| 6,044 | 38.509804 | 106 | py |
dynet | dynet-master/examples/mnist/basic-mnist-benchmarks/mnist_pytorch.py | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import time
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=10000, metavar='N',
help='input batch size for testing (default: 10000)')
parser.add_argument('--epochs', type=int, default=20, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
# parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
# help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.fc1 = nn.Linear(7*7*64, 1024)
self.fc2 = nn.Linear(1024, 10, bias=False)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 7*7*64)
x = F.relu(self.fc1(x))
x = F.dropout(x, 0.4)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net()
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr) # , momentum=args.momentum)
def train(epoch):
model.train()
epoch_start = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
epoch_end = time.time()
print("{} s per epoch".format(epoch_end-epoch_start))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, args.epochs + 1):
train(epoch)
test()
| 4,645 | 38.372881 | 95 | py |
dynet | dynet-master/examples/mnist/basic-mnist-benchmarks/mnist_dynet_minibatch.py | from __future__ import division
import os
import struct
import argparse
import random
import time
import numpy as np
# import dynet as dy
# import dynet_config
# dynet_config.set_gpu()
import dynet as dy
# First, download the MNIST dataset from the official website and decompress it.
# wget -O - http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz | gunzip > train-images.idx3-ubyte
# wget -O - http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz | gunzip > train-labels.idx1-ubyte
# wget -O - http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz | gunzip > t10k-images.idx3-ubyte
# wget -O - http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz | gunzip > t10k-labels.idx1-ubyte
parser = argparse.ArgumentParser(description='DyNet MNIST Example')
parser.add_argument("--path", type=str, default=".",
help="Path to the MNIST data files (unzipped).")
parser.add_argument('--batch-size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--log-interval', type=int, default=10,
help='how many batches to wait before logging training status')
parser.add_argument("--dynet_autobatch", type=int, default=0,
help="Set to 1 to turn on autobatching.")
parser.add_argument("--dynet_gpus", type=int, default=0,
help="Set to 1 to train on GPU.")
HIDDEN_DIM = 1024
DROPOUT_RATE = 0.4
# Adapted from https://gist.github.com/akesling/5358964
def read(dataset, path):
if dataset is "training":
fname_img = os.path.join(path, "train-images.idx3-ubyte")
fname_lbl = os.path.join(path, "train-labels.idx1-ubyte")
elif dataset is "testing":
fname_img = os.path.join(path, "t10k-images.idx3-ubyte")
fname_lbl = os.path.join(path, "t10k-labels.idx1-ubyte")
else:
raise ValueError("dataset must be 'training' or 'testing'")
with open(fname_lbl, 'rb') as flbl:
_, _ = struct.unpack(">II", flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
_, _, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.multiply(np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols), 1.0/255.0)
get_img = lambda idx: (lbl[idx], img[idx])
for i in range(len(lbl)):
yield get_img(i)
class mnist_network(object):
def __init__(self, m):
self.pConv1 = m.add_parameters((5, 5, 1, 32))
self.pB1 = m.add_parameters((32, ))
self.pConv2 = m.add_parameters((5, 5, 32, 64))
self.pB2 = m.add_parameters((64, ))
self.pW1 = m.add_parameters((HIDDEN_DIM, 7*7*64))
self.pB3 = m.add_parameters((HIDDEN_DIM, ))
self.pW2 = m.add_parameters((10, HIDDEN_DIM))
def __call__(self, inputs, dropout=False):
x = dy.inputTensor(inputs, batched=True)
batchsize = x.dim()[-1]
conv1 = dy.parameter(self.pConv1)
b1 = dy.parameter(self.pB1)
x = dy.conv2d_bias(x, conv1, b1, [1, 1], is_valid=False)
x = dy.rectify(dy.maxpooling2d(x, [2, 2], [2, 2]))
conv2 = dy.parameter(self.pConv2)
b2 = dy.parameter(self.pB2)
x = dy.conv2d_bias(x, conv2, b2, [1, 1], is_valid=False)
x = dy.rectify(dy.maxpooling2d(x, [2, 2], [2, 2]))
x = dy.reshape(x, (7*7*64, 1), batch_size=batchsize)
w1 = dy.parameter(self.pW1)
b3 = dy.parameter(self.pB3)
h = dy.rectify(w1*x+b3)
if dropout:
h = dy.dropout(h, DROPOUT_RATE)
w2 = dy.parameter(self.pW2)
output = w2*h
# output = dy.softmax(w2*h)
return output
def create_network_return_loss(self, inputs, expected_output, dropout=False):
out = self(inputs, dropout)
loss = dy.pickneglogsoftmax_batch(out, expected_output)
# loss = -dy.log(dy.pick(out, expected_output))
return loss
def create_network_return_best(self, inputs, dropout=False):
out = self(inputs, dropout)
out = dy.softmax(out)
return np.argmax(out.npvalue(), 0)
# return np.argmax(out.npvalue())
args = parser.parse_args()
train_data = [(lbl, img) for (lbl, img) in read("training", args.path)]
test_data = [(lbl, img) for (lbl, img) in read("testing", args.path)]
m = dy.ParameterCollection()
network = mnist_network(m)
trainer = dy.SimpleSGDTrainer(m, learning_rate=args.lr)
def train(epoch):
random.shuffle(train_data)
i = 0
epoch_start = time.time()
while i < len(train_data):
dy.renew_cg()
lbls = []
imgs = []
for lbl, img in train_data[i:i+args.batch_size]:
lbls.append(lbl)
imgs.append(img)
losses = network.create_network_return_loss(imgs, lbls, dropout=True)
loss = dy.mean_batches(losses)
if (int(i/args.batch_size)) % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, i, len(train_data),
100. * i/len(train_data), loss.value()))
loss.backward()
trainer.update()
i += args.batch_size
epoch_end = time.time()
print("{} s per epoch".format(epoch_end-epoch_start))
def test():
lbls = []
imgs = []
for lbl, img in test_data:
lbls.append(lbl)
imgs.append(img)
dy.renew_cg()
losses = network.create_network_return_loss(imgs, lbls, dropout=False)
loss = dy.mean_batches(losses)
predicts = network.create_network_return_best(imgs, dropout=False)
correct = np.sum(lbls == predicts[0])
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
loss.value(), correct, len(test_data),
100. * correct / len(test_data)))
for epoch in range(1, args.epochs + 1):
train(epoch)
test()
# m.save("/tmp/tmp.model")
| 6,215 | 38.341772 | 106 | py |
dynet | dynet-master/examples/tensorboard/rnnlm-batch.py | import dynet as dy
import time
import random
from pycrayon import CrayonClient
LAYERS = 2
INPUT_DIM = 256 #50 #256
HIDDEN_DIM = 256 # 50 #1024
VOCAB_SIZE = 0
MB_SIZE = 50 # mini batch size
import argparse
from collections import defaultdict
from itertools import count
import sys
import util
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder):
# Char-level LSTM (layers=2, input=256, hidden=128, model)
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# Lookup parameters for word embeddings
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
# Softmax weights/biases on top of LSTM outputs
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
# Build the language model graph
def BuildLMGraph(self, sents):
dy.renew_cg()
# initialize the RNN
init_state = self.builder.initial_state()
# parameters -> expressions
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
S = vocab.w2i["<s>"]
# get the cids and masks for each step
tot_chars = 0
cids = []
masks = []
for i in range(len(sents[0])):
cids.append([(vocab.w2i[sent[i]] if len(sent) > i else S) for sent in sents])
mask = [(1 if len(sent)>i else 0) for sent in sents]
masks.append(mask)
tot_chars += sum(mask)
# start the rnn with "<s>"
init_ids = cids[0]
s = init_state.add_input(lookup_batch(self.lookup, init_ids))
losses = []
# feed char vectors into the RNN and predict the next char
for cid, mask in zip(cids[1:], masks[1:]):
score = dy.affine_transform([bias, R, s.output()])
loss = dy.pickneglogsoftmax_batch(score, cid)
# mask the loss if at least one sentence is shorter
if mask[-1] != 1:
mask_expr = dy.inputVector(mask)
mask_expr = dy.reshape(mask_expr, (1,), len(sents))
loss = loss * mask_expr
losses.append(loss)
# update the state of the RNN
cemb = dy.lookup_batch(self.lookup, cid)
s = s.add_input(cemb)
return dy.sum_batches(dy.esum(losses)), tot_chars
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
dy.renew_cg()
state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
cw = first
while True:
x_t = dy.lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = dy.softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='Path to the corpus file.')
parser.add_argument('crayserver', help='Server location for crayon.')
parser.add_argument('expname', help='Experiment name')
args = parser.parse_args()
# Connect to the server
cc = CrayonClient(hostname=args.crayserver)
#Create a new experiment
myexp = cc.create_experiment(args.expname)
train = util.CharsCorpusReader(args.corpus, begin="<s>")
vocab = util.Vocab.from_corpus(train)
VOCAB_SIZE = vocab.size()
model = dy.ParameterCollection()
trainer = dy.SimpleSGDTrainer(model)
#lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder)
lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.LSTMBuilder)
train = list(train)
# Sort training sentences in descending order and count minibatches
train.sort(key=lambda x: -len(x))
train_order = [x*MB_SIZE for x in range(int((len(train)-1)/MB_SIZE + 1))]
# Perform training
i = 0
chars = loss = 0.0
for ITER in range(100):
random.shuffle(train_order)
#_start = time.time()
for sid in train_order:
i += 1
#if i % int(50) == 0:
trainer.status()
if chars > 0: print(loss / chars,)
for _ in range(1):
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
loss = 0.0
chars = 0.0
# train on the minibatch
errs, mb_chars = lm.BuildLMGraph(train[sid: sid + MB_SIZE])
loss += errs.scalar_value()
# Add a scalar value to the experiment for the set of data points named loss evolution
myexp.add_scalar_value("lossevolution", loss)
chars += mb_chars
errs.backward()
trainer.update()
#print "TM:",(time.time() - _start)/chars
print("ITER",ITER,loss)
#print(loss / chars,)
#print "TM:",(time.time() - _start)/len(train)
trainer.status()
# To save the experiment
filename = myexp.to_zip()
print("Save tensorboard experiment at {}".format(filename))
| 5,527 | 32.707317 | 105 | py |
dynet | dynet-master/examples/tensorboard/util.py | import mmap
class Vocab:
def __init__(self, w2i):
self.w2i = dict(w2i)
self.i2w = {i:w for w,i in w2i.items()}
@classmethod
def from_corpus(cls, corpus):
w2i = {}
for sent in corpus:
for word in sent:
w2i.setdefault(word, len(w2i))
return Vocab(w2i)
def size(self):
return len(self.w2i.keys())
#This corpus reader can be used when reading large text file into a memory can solve IO bottleneck of training.
#Use it exactly as the regular CorpusReader from the rnnlm.py
class FastCorpusReader:
def __init__(self, fname):
self.fname = fname
self.f = open(fname, 'rb')
def __iter__(self):
#This usage of mmap is for a Linux\OS-X
#For Windows replace prot=mmap.PROT_READ with access=mmap.ACCESS_READ
m = mmap.mmap(self.f.fileno(), 0, prot=mmap.PROT_READ)
data = m.readline()
while data:
line = data
data = m.readline()
line = line.lower()
line = line.strip().split()
yield line
class CorpusReader:
def __init__(self, fname):
self.fname = fname
def __iter__(self):
for line in file(self.fname):
line = line.strip().split()
#line = [' ' if x == '' else x for x in line]
yield line
class CharsCorpusReader:
def __init__(self, fname, begin=None):
self.fname = fname
self.begin = begin
def __iter__(self):
begin = self.begin
with open(self.fname) as f:
for line in f:
line = list(line)
if begin:
line = [begin] + line
yield line
| 1,731 | 28.355932 | 111 | py |
dynet | dynet-master/examples/treelstm/main.py | from __future__ import print_function
import dynet as dy
dyparams = dy.DynetParams()
dyparams.from_args()
import sys
import time
import os
import argparse
import warnings
import zipfile
from six.moves import urllib
from model import TreeLSTMClassifier
from utils import get_embeds, acc_eval
from scheduler import Scheduler
from dataloader import DataLoader
DATA_URL='https://github.com/zhiyong1997/large-repo/raw/master/packed_data_and_model.zip'
data_dir = 'trees'
glove_path = 'glove_filtered.txt'
def maybe_download_and_extract():
"""Download and extract processed data and embeddings."""
dest_directory = '.'
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'trees')
if not os.path.exists(extracted_dir_path):
zip_ref = zipfile.ZipFile(filepath, 'r')
zip_ref.extractall(dest_directory)
zip_ref.close()
def establish_args():
parser = argparse.ArgumentParser()
# dynet global setting
parser.add_argument("--dynet-seed", default=0, type=int)
parser.add_argument("--dynet-mem", default=512, type=int)
parser.add_argument("--dynet-gpus", default=0, type=int)
parser.add_argument("--dynet-autobatch", default=0, type=int)
# control parameters
parser.add_argument('--mode', default='train', help='available modes: [train, test]')
parser.add_argument('--model_meta_file', default=None, type=str)
# scheduler parameters
parser.add_argument('--trainer', default='AdagradTrainer', help='trainer name in dynet')
parser.add_argument('--sparse', default=1, type=int, help='sparse update 0/1')
parser.add_argument('--learning_rate_param', default=0.05, type=float)
parser.add_argument('--learning_rate_embed', default=0.005, type=float)
parser.add_argument('--save_dir', default='saved_models')
parser.add_argument('--batch_size', default=25, type=int)
parser.add_argument('--regularization_strength', default=1e-4, type=float)
# model parameters
parser.add_argument('--use_glove', default=False, action='store_true', help='Use glove vectors or not.')
parser.add_argument('--dropout_rate', default=0.3, type=float)
parser.add_argument('--wembed_size', default=300, type=int, help='embedding size')
parser.add_argument('--hidden_size', default=150, type=int, help='hidden size')
args = parser.parse_args()
# check and ensure feasibility
if args.use_glove and args.wembed_size != 300:
warnings.warn('Warning: word embedding size must be 300 when using glove, auto adjusted.')
args.wembed_size = 300
if args.mode not in ['train', 'test']:
raise ValueError('Wrong mode, [train, test] available now')
if args.mode == 'test':
if args.model_meta_file is None:
raise ValueError("Missing model meta file to load")
else:
meta_path = os.path.join(args.save_dir, 'meta')
param_path = os.path.join(args.save_dir, 'param')
embed_path = os.path.join(args.save_dir, 'embed')
if not os.path.exists(meta_path): os.makedirs(meta_path)
if not os.path.exists(param_path): os.makedirs(param_path)
if not os.path.exists(embed_path): os.makedirs(embed_path)
return args
maybe_download_and_extract()
start = time.time()
args = establish_args()
scheduler_params = {
'trainer': args.trainer,
'sparse': args.sparse == 1,
'learning_rate_param': args.learning_rate_param,
'learning_rate_embed': args.learning_rate_embed,
'learning_rate_decay': 0.99,
'save_dir': args.save_dir,
'batch_size': args.batch_size,
'regularization_strength': args.regularization_strength
}
model_params = {
'wembed_size': args.wembed_size,
'hidden_size': args.hidden_size,
'dropout_rate': args.dropout_rate
}
train = DataLoader(os.path.join(data_dir, 'train.txt'))
dev = DataLoader(os.path.join(data_dir, 'dev.txt'))
test = DataLoader(os.path.join(data_dir, 'test.txt'))
word_embed, w2i = get_embeds(glove_path)
if not args.use_glove: word_embed = None
print("startup time: %r" % (time.time() - start))
def exec_train(model_params, scheduler_params):
model = TreeLSTMClassifier(n_classes=5, w2i=w2i, word_embed=word_embed,
params=model_params)
scheduler = Scheduler(model, train, dev, scheduler_params)
return scheduler.exec_train()
if args.mode == 'train':
acc, model_meta_file = exec_train(model_params, scheduler_params)
else:
model_meta_file = args.model_meta_file
def eval_model(model_meta_file):
print('model_meta_file {}'.format(model_meta_file))
model = TreeLSTMClassifier(n_classes=5, w2i=w2i, word_embed=word_embed,
params=model_params, model_meta_file=model_meta_file)
acc = acc_eval(test, model)
print('test acc %.4f' % acc)
eval_model(model_meta_file)
| 5,324 | 35.472603 | 108 | py |
dynet | dynet-master/examples/treelstm/dataloader.py | import re
import codecs
import random
from collections import Counter
def read_dataset(filename):
return [Tree.from_sexpr(line.strip()) for line in codecs.open(filename, "r")]
def get_vocabs(trees):
label_vocab = Counter()
word_vocab = Counter()
for tree in trees:
label_vocab.update([n.label for n in tree.nonterms()])
word_vocab.update([l.label for l in tree.leaves()])
words = ["_UNK_"] + [x for x, c in word_vocab.items() if c > 0]
w2i = {w: i for i, w in enumerate(words)}
return w2i, words
def _tokenize_sexpr(s):
tokker = re.compile(r" +|[()]|[^ ()]+")
toks = [t for t in [match.group(0) for match in tokker.finditer(s)] if t[0] != " "]
return toks
def _within_bracket(toks):
label = next(toks)
children = []
for tok in toks:
if tok == "(":
children.append(_within_bracket(toks))
elif tok == ")":
return Tree(label, children)
else:
children.append(Tree(tok, None))
raise RuntimeError('Error Parsing sexpr string')
class Tree(object):
def __init__(self, label, children=None):
self.label = label if children is None else int(label)
self.children = children
@staticmethod
def from_sexpr(string):
toks = iter(_tokenize_sexpr(string))
if next(toks) != "(":
raise RuntimeError('Error Parsing sexpr string')
return _within_bracket(toks)
def __str__(self):
if self.children is None: return self.label
return "[%s %s]" % (self.label, " ".join([str(c) for c in self.children]))
def isleaf(self):
return self.children is None
def leaves_iter(self):
if self.isleaf():
yield self
else:
for c in self.children:
for l in c.leaves_iter(): yield l
def leaves(self):
return list(self.leaves_iter())
def nonterms_iter(self):
if not self.isleaf():
yield self
for c in self.children:
for n in c.nonterms_iter(): yield n
def nonterms(self):
return list(self.nonterms_iter())
class DataLoader(object):
def __init__(self, datapath):
self.data = read_dataset(datapath)
self.n_samples = len(self.data)
self.reset()
def reset(self, shuffle=True):
self.idx = 0
if shuffle: random.shuffle(self.data)
def __iter__(self):
while self.idx < self.n_samples:
yield self.data[self.idx]
self.idx += 1
def batches(self, batch_size=25):
while self.idx < self.n_samples:
yield self.data[self.idx: self.idx + batch_size]
self.idx += batch_size
| 2,717 | 26.454545 | 87 | py |
dynet | dynet-master/examples/treelstm/utils.py | import codecs
import numpy as np
import dynet as dy
def acc_eval(dataset, model):
dataset.reset(shuffle=False)
good = bad = 0.0
for tree in dataset:
dy.renew_cg()
pred = np.argmax(model.predict_for_tree(tree, decorate=False, training=False))
if pred == tree.label:
good += 1
else:
bad += 1
acc = good / (good + bad)
return acc
def get_embeds(embed_path):
word_embeds, w2i = [np.random.randn(300)], {'_UNK_': 0}
with codecs.open(embed_path) as f:
for line in f:
line = line.strip().split(' ')
word, embed = line[0], line[1:]
w2i[word] = len(word_embeds)
word_embeds.append(np.array(embed, dtype=np.float32))
w2i['-LRB-'] = w2i['(']
w2i['-RRB-'] = w2i[')']
return np.array(word_embeds), w2i
| 845 | 26.290323 | 86 | py |
dynet | dynet-master/examples/treelstm/model.py | import dynet as dy
import numpy as np
import os
class TreeLSTMBuilder(object):
def __init__(self, pc_param, pc_embed, word_vocab, wdim, hdim, word_embed=None):
self.WS = [pc_param.add_parameters((hdim, wdim)) for _ in "iou"]
self.US = [pc_param.add_parameters((hdim, 2 * hdim)) for _ in "iou"]
self.UFS = [pc_param.add_parameters((hdim, 2 * hdim)) for _ in "ff"]
self.BS = [pc_param.add_parameters(hdim) for _ in "iouf"]
self.E = pc_embed.add_lookup_parameters((len(word_vocab), wdim), init=word_embed)
self.w2i = word_vocab
def expr_for_tree(self, tree, decorate=False, training=True):
if tree.isleaf(): raise RuntimeError('Tree structure error: meet with leaves')
if len(tree.children) == 1:
if not tree.children[0].isleaf(): raise RuntimeError(
'Tree structure error: tree nodes with one child should be a leaf')
emb = self.E[self.w2i.get(tree.children[0].label, 0)]
Wi, Wo, Wu = [dy.parameter(w) for w in self.WS]
bi, bo, bu, _ = [dy.parameter(b) for b in self.BS]
i = dy.logistic(dy.affine_transform([bi, Wi, emb]))
o = dy.logistic(dy.affine_transform([bo, Wo, emb]))
u = dy.tanh(dy.affine_transform([bu, Wu, emb]))
c = dy.cmult(i, u)
h = dy.cmult(o, dy.tanh(c))
if decorate: tree._e = h
return h, c
if len(tree.children) != 2: raise RuntimeError('Tree structure error: only binary trees are supported.')
e1, c1 = self.expr_for_tree(tree.children[0], decorate)
e2, c2 = self.expr_for_tree(tree.children[1], decorate)
Ui, Uo, Uu = [dy.parameter(u) for u in self.US]
Uf1, Uf2 = [dy.parameter(u) for u in self.UFS]
bi, bo, bu, bf = [dy.parameter(b) for b in self.BS]
e = dy.concatenate([e1, e2])
i = dy.logistic(dy.affine_transform([bi, Ui, e]))
o = dy.logistic(dy.affine_transform([bo, Uo, e]))
f1 = dy.logistic(dy.affine_transform([bf, Uf1, e]))
f2 = dy.logistic(dy.affine_transform([bf, Uf2, e]))
u = dy.tanh(dy.affine_transform([bu, Uu, e]))
c = dy.cmult(i, u) + dy.cmult(f1, c1) + dy.cmult(f2, c2)
h = dy.cmult(o, dy.tanh(c))
if decorate: tree._e = h
return h, c
class TreeLSTMClassifier(object):
def __init__(self, n_classes, w2i, word_embed, params, model_meta_file=None):
self.params = params.copy()
self.dropout_rate = self.params['dropout_rate']
self.use_dropout = self.dropout_rate > 0
if model_meta_file is not None:
saved_params = np.load(model_meta_file).item()
self.params.update(saved_params)
self.pc_param = dy.ParameterCollection()
self.pc_embed = dy.ParameterCollection()
self.builder = TreeLSTMBuilder(self.pc_param, self.pc_embed, w2i, self.params['wembed_size'],
self.params['hidden_size'], word_embed)
self.W_ = self.pc_param.add_parameters((n_classes, self.params['hidden_size']))
if model_meta_file is not None:
self._load_param_embed(model_meta_file)
def predict_for_tree(self, tree, decorate=True, training=True):
h, _ = self.builder.expr_for_tree(tree, decorate, training)
if training:
for node in tree.nonterms_iter():
e = dy.dropout(node._e, self.dropout_rate) if self.use_dropout else node._e
node._logits = self.W_ * e
else:
tree._logits = self.W_ * h
return tree._logits.npvalue()
def losses_for_tree(self, tree, summation=True):
self.predict_for_tree(tree, decorate=True, training=True)
nodes = tree.nonterms()
losses = [dy.pickneglogsoftmax(nt._logits, nt.label) for nt in nodes]
return dy.esum(losses) if summation else losses, len(nodes)
def losses_for_tree_batch(self, trees):
batch_losses = []
for tree in trees:
losses, _ = self.losses_for_tree(tree, summation=False)
batch_losses += losses
return dy.esum(batch_losses)
def regularization_loss(self, coef):
losses = [dy.l2_norm(p) ** 2 for p in self.pc_param.parameters_list()]
return (coef / 2) * dy.esum(losses)
def save(self, save_dir, model_name):
meta_path = os.path.join(save_dir, 'meta', model_name)
param_path = os.path.join(save_dir, 'param', model_name)
embed_path = os.path.join(save_dir, 'embed', model_name)
np.save(meta_path, self.params)
self.pc_param.save(param_path)
self.pc_embed.save(embed_path)
return meta_path + '.npy'
def _load_param_embed(self, model_meta_file):
model_meta_file = model_meta_file.replace('.npy', '')
param_path = model_meta_file.replace('meta', 'param')
embed_path = model_meta_file.replace('meta', 'embed')
self.pc_param.populate(param_path)
self.pc_embed.populate(embed_path)
@staticmethod
def delete(model_meta_file):
if model_meta_file is None: return
os.remove(model_meta_file)
model_meta_file = model_meta_file.replace('.npy', '')
os.remove(model_meta_file.replace('meta', 'param'))
os.remove(model_meta_file.replace('meta', 'embed'))
| 5,341 | 45.859649 | 112 | py |
dynet | dynet-master/examples/treelstm/scheduler.py | import time
import dynet as dy
import numpy as np
from utils import acc_eval
class Scheduler:
def __init__(self, model, train, dev, params):
self.train, self.dev = train, dev
self.model = model
self.params = params
self.trainer_param = getattr(dy, params['trainer'])(model.pc_param)
self.trainer_embed = getattr(dy, params['trainer'])(model.pc_embed)
self.trainer_param.learning_rate = params['learning_rate_param']
self.trainer_embed.learning_rate = params['learning_rate_embed']
all_trainers = [self.trainer_param, self.trainer_embed]
for trainer in all_trainers:
trainer.set_clip_threshold(-1)
trainer.set_sparse_updates(params['sparse'])
def exec_train(self, max_turns=1000):
time_stamp = time.time()
total_time = []
best_acc = 0
n_endure, endure_upper = 0, 10
model_meta_file = None
for i in range(max_turns):
self.train.reset()
time_start = time.time()
for j, trees in enumerate(self.train.batches(batch_size=self.params['batch_size']), 1):
dy.renew_cg()
loss = self.model.losses_for_tree_batch(trees)
loss += self.model.regularization_loss(coef=self.params['regularization_strength'])
loss_value = loss.value()
loss.backward()
self.trainer_param.update()
self.trainer_embed.update()
if j % 50 == 0:
self.trainer_param.status()
print(loss_value)
time_epoch = time.time() - time_start
total_time.append(time_epoch)
print('epoch {} time {}'.format(i, time_epoch))
self.trainer_param.learning_rate *= self.params['learning_rate_decay']
self.trainer_embed.learning_rate *= self.params['learning_rate_decay']
acc = acc_eval(self.dev, self.model)
best_acc, updated = max(acc, best_acc), acc > best_acc
print("dev_acc=%.4f best_dev_acc=%.4f" % (acc, best_acc))
if updated:
self.model.delete(model_meta_file)
model_meta_file = self.model.save(self.params['save_dir'], str(time_stamp) + '_' + str(i))
n_endure = 0
else:
n_endure += 1
if n_endure > endure_upper:
break
self._print_time_statistics(total_time)
return best_acc, model_meta_file
@staticmethod
def _print_time_statistics(total_time):
print("N_EPOCH {}, MEAN {} s, STD {} s".format(len(total_time) - 1, np.mean(total_time[1:]), np.std(total_time[1:])))
| 2,726 | 39.701493 | 125 | py |
dynet | dynet-master/examples/treelstm/filter_glove.py | import codecs
import re
import os
data_dir = 'trees'
datasets = ['train', 'dev', 'test']
glove_origin_path = 'glove.840B.300d.txt'
glove_filtered_path = 'glove_filtered.txt'
def get_vocab(file_path):
vocab = set()
tokker = re.compile(r'([^ ()]+)\)')
with codecs.open(file_path) as f:
for line in f:
for match in tokker.finditer(line.strip()):
vocab.add(match.group(1))
return vocab
vocab = set()
for dataset in datasets:
tem_set = get_vocab(os.path.join(data_dir, dataset + '.txt'))
vocab.update(tem_set)
total = cnt = 0
with codecs.open(glove_origin_path) as fin:
with codecs.open(glove_filtered_path, 'w') as fout:
for line in fin:
total += 1
word = line.split(' ', 1)[0]
if word in vocab or word == '(' or word == ')':
cnt += 1
fout.write(line)
print('total: {}, after filtering: {}'.format(total, cnt))
| 954 | 24.810811 | 65 | py |
dynet | dynet-master/examples/devices/xor-multidevice.py | # Usage:
# python xor-multidevice.py --dynet-devices CPU,GPU:0,GPU:1
# or python xor-multidevice.py --dynet-gpus 2
import sys
import dynet as dy
#xsent = True
xsent = False
HIDDEN_SIZE = 8
ITERATIONS = 2000
m = dy.Model()
trainer = dy.SimpleSGDTrainer(m)
pW1 = m.add_parameters((HIDDEN_SIZE, 2), device="GPU:1")
pb1 = m.add_parameters(HIDDEN_SIZE, device="GPU:1")
pW2 = m.add_parameters((HIDDEN_SIZE, HIDDEN_SIZE), device="GPU:0")
pb2 = m.add_parameters(HIDDEN_SIZE, device="GPU:0")
pV = m.add_parameters((1, HIDDEN_SIZE), device="CPU")
pa = m.add_parameters(1, device="CPU")
if len(sys.argv) == 2:
m.populate_from_textfile(sys.argv[1])
dy.renew_cg()
W1, b1, W2, b2, V, a = dy.parameter(pW1, pb1, pW2, pb2, pV, pa)
x = dy.vecInput(2, "GPU:1")
y = dy.scalarInput(0, "CPU")
h1 = dy.tanh((W1*x) + b1)
h1_gpu0 = dy.to_device(h1, "GPU:0")
h2 = dy.tanh((W2*h1_gpu0) + b2)
h2_cpu = dy.to_device(h2, "CPU")
if xsent:
y_pred = dy.logistic((V*h2_cpu) + a)
loss = dy.binary_log_loss(y_pred, y)
T = 1
F = 0
else:
y_pred = (V*h2_cpu) + a
loss = dy.squared_distance(y_pred, y)
T = 1
F = -1
for iter in range(ITERATIONS):
mloss = 0.0
for mi in range(4):
x1 = mi % 2
x2 = (mi // 2) % 2
x.set([T if x1 else F, T if x2 else F])
y.set(T if x1 != x2 else F)
mloss += loss.scalar_value()
loss.backward()
trainer.update()
mloss /= 4.
print("loss: %0.9f" % mloss)
x.set([F,T])
z = -(-y_pred)
print(z.scalar_value())
m.save("xor.pymodel")
dy.renew_cg()
W1, b1, W2, b2, V, a = dy.parameter(pW1, pb1, pW2, pb2, pV, pa)
x = dy.vecInput(2, "GPU:1")
y = dy.scalarInput(0, "CPU")
h1 = dy.tanh((W1*x) + b1)
h1_gpu0 = dy.to_device(h1, "GPU:0")
h2 = dy.tanh((W2*h1_gpu0) + b2)
h2_cpu = dy.to_device(h2, "CPU")
if xsent:
y_pred = dy.logistic((V*h2_cpu) + a)
else:
y_pred = (V*h2_cpu) + a
x.set([T,F])
print("TF",y_pred.scalar_value())
x.set([F,F])
print("FF",y_pred.scalar_value())
x.set([T,T])
print("TT",y_pred.scalar_value())
x.set([F,T])
print("FT",y_pred.scalar_value())
| 2,082 | 22.404494 | 66 | py |
dynet | dynet-master/examples/devices/cpu_vs_gpu.py | # Usage: python cpu_vs_gpu.py
import time
from multiprocessing import Process
def do_cpu():
import _dynet as C
C.init()
cm = C.Model()
cpW = cm.add_parameters((1000,1000))
s = time.time()
C.renew_cg()
W = C.parameter(cpW)
W = W*W*W*W*W*W*W
z = C.squared_distance(W,W)
z.value()
z.backward()
print("CPU time:",time.time() - s)
def do_gpu():
import _dynet as G
import sys
sys.argv.append('--dynet-devices')
sys.argv.append('GPU:0')
G.init()
gm = G.Model()
gpW = gm.add_parameters((1000,1000))
s = time.time()
G.renew_cg()
W = G.parameter(gpW)
W = W*W*W*W*W*W*W
z = G.squared_distance(W,W)
z.value()
z.backward()
print("GPU time:",time.time() - s)
if __name__ == '__main__':
procs1 = Process(target=do_cpu)
procs1.start()
procs2 = Process(target=do_gpu)
procs2.start()
procs1.join()
procs2.join()
| 866 | 18.266667 | 38 | py |
dynet | dynet-master/examples/reinforcement-learning/ddpg.py | import dynet as dy
import numpy as np
from memory import Memory
from network import MLP
# Deep Deterministic Policy Gradient: https://arxiv.org/abs/1509.02971
# An reinforcement learning agent to learn in environments which have continuous action spaces.
class DDPG:
def __init__(self, obs_dim, action_dim, hiddens_actor, hiddens_critic, layer_norm=False, memory_size=50000):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.noise_stddev = 1.
self.noise_stddev_decrease = 5e-4
self.noise_stddev_lower = 5e-2
actor_activations = [dy.tanh for _ in range(len(hiddens_actor))] + [dy.tanh]
critic_activations = [dy.tanh for _ in range(len(hiddens_critic))] + [None]
self.actor = MLP(inpt_shape=(obs_dim,), hiddens=hiddens_actor + [action_dim], activation=actor_activations,
layer_norm=layer_norm)
self.critic = MLP(inpt_shape=(obs_dim + action_dim,), hiddens=hiddens_critic + [1],
activation=critic_activations, layer_norm=layer_norm)
self.actor_target = MLP(inpt_shape=(obs_dim,), hiddens=hiddens_actor + [action_dim],
activation=actor_activations, layer_norm=layer_norm)
self.critic_target = MLP(inpt_shape=(obs_dim + action_dim,), hiddens=hiddens_critic + [1],
activation=critic_activations, layer_norm=layer_norm)
self.actor_target.update(self.actor, soft=False)
self.critic_target.update(self.critic, soft=False)
self.trainer_actor = dy.AdamTrainer(self.actor.pc)
self.trainer_critic = dy.AdamTrainer(self.critic.pc)
self.trainer_actor.set_learning_rate(1e-4)
self.trainer_critic.set_learning_rate(1e-3)
self.memory = Memory(memory_size)
def act(self, obs):
dy.renew_cg()
action = self.actor(obs).npvalue()
if self.noise_stddev > 0:
noise = np.random.randn(self.action_dim) * self.noise_stddev
action += noise
return np.clip(action, -1, 1)
def store(self, exp):
self.memory.store(exp)
def learn(self, batch_size):
exps = self.memory.sample(batch_size)
obss, actions, rewards, obs_nexts, dones = self._process(exps)
# Update critic
dy.renew_cg()
target_actions = self.actor_target(obs_nexts, batched=True)
target_values = self.critic_target(dy.concatenate([dy.inputTensor(obs_nexts, batched=True), target_actions]),
batched=True)
target_values = rewards + 0.99 * target_values.npvalue() * (1 - dones)
dy.renew_cg()
values = self.critic(np.concatenate([obss, actions]), batched=True)
loss = dy.mean_batches((values - dy.inputTensor(target_values, batched=True)) ** 2)
loss_value_critic = loss.npvalue()
loss.backward()
self.trainer_critic.update()
# update actor
dy.renew_cg()
actions = self.actor(obss, batched=True)
obs_and_actions = dy.concatenate([dy.inputTensor(obss, batched=True), actions])
loss = -dy.mean_batches(self.critic(obs_and_actions, batched=True))
loss_value_actor = loss.npvalue()
loss.backward()
self.trainer_actor.update()
self.noise_stddev = (
self.noise_stddev - self.noise_stddev_decrease) if self.noise_stddev > self.noise_stddev_lower else self.noise_stddev_lower
self.actor_target.update(self.actor, soft=True)
self.critic_target.update(self.critic, soft=True)
return loss_value_actor + loss_value_critic
# data in memory: [memory_size, exp], exp: [obs, action, reward, obs_next, done]
# output: [obss, actions, rewards, obs_nexts, dones], 'X's: [x, batch_size]
@staticmethod
def _process(exps):
n = len(exps)
ret = []
for i in range(5):
ret.append([])
for j in range(n):
ret[i].append(exps[j][i])
ret = [np.transpose(arr) for arr in ret]
return ret
@property
def epsilon(self):
return self.noise_stddev
| 4,171 | 39.901961 | 143 | py |
dynet | dynet-master/examples/reinforcement-learning/reduce_tree.py | import operator
import numpy as np
# A simple binary tree structure to calculate some statistics from leaves.
class ReduceTree(object):
def __init__(self, size, op):
if size & (size - 1) != 0:
raise ValueError("size mush be a power of 2.")
self.size = size
self.values = np.zeros(2 * self.size)
self.op = op
@property
def root(self):
return self.values[1]
def __setitem__(self, idx, val):
if isinstance(idx, int):
idx += self.size
self.values[idx] = val
self._percolate_up(idx, val)
elif isinstance(idx, list):
for i, idxx in enumerate(idx):
idxx += self.size
self.values[idxx] = val[i]
self._percolate_up(idxx, val[i])
else:
raise RuntimeError("Not indexable type")
def __getitem__(self, idx):
if isinstance(idx, int):
return self.values[idx + self.size]
elif isinstance(idx, list):
return self.values[np.array(idx) + self.size]
else:
raise RuntimeError("Not indexable type")
def _percolate_up(self, idx, val):
idx //= 2
while idx > 0:
self.values[idx] = self.op(self.values[2 * idx], self.values[2 * idx + 1])
idx //= 2
class SumTree(ReduceTree):
def __init__(self, size):
super().__init__(size, operator.add)
def sample(self, value):
idx = 1
while idx < self.size:
child = 2 * idx
if value <= self.values[child]:
idx = child
else:
value -= self.values[child]
idx = child + 1
return idx - self.size
| 1,743 | 27.590164 | 86 | py |
dynet | dynet-master/examples/reinforcement-learning/memory.py | import numpy as np
from math import log, ceil
from reduce_tree import ReduceTree, SumTree
# A simple memory to store and sample experiences.
class Memory(object):
def __init__(self, size):
self.size = size
self.idx = 0
self.memory = np.zeros(size, dtype=object)
def store(self, exp):
self.memory[self.idx % self.size] = exp
self.idx += 1
def sample(self, batch_size):
indices = np.random.choice(min(self.idx, self.size), batch_size)
return self.memory[indices]
# Prioritized Replay: https://arxiv.org/abs/1511.05952
class PrioritizedMemory(object):
def __init__(self, size, alpha=0.6):
self.size = int(2 ** ceil(log(size, 2)))
self.memory = np.zeros(self.size, dtype=object)
self.sum_tree = SumTree(self.size)
self.min_tree = ReduceTree(self.size, min)
self.idx = 0
self.max_value = 1.
self.max_value_upper = 1000.
self.alpha = alpha
def store(self, exp):
idx = self.idx % self.size
self.memory[idx] = exp
self.sum_tree[idx] = self.max_value
self.min_tree[idx] = self.max_value
self.idx += 1
def sample(self, batch_size, beta):
indices = []
max_value = self.sum_tree.root
for _ in range(batch_size):
value = np.random.uniform(0, max_value)
idx = self.sum_tree.sample(value)
indices.append(idx)
min_value = self.min_tree.root
return indices, self.memory[indices], (self.sum_tree[indices] / (min_value + 1e-4)) ** (-beta)
def update(self, indices, values):
values = np.array(values)
values_modified = values ** self.alpha
self.sum_tree[indices] = values_modified
self.min_tree[indices] = values_modified
self.max_value = max(self.max_value, np.max(values))
self.max_value = min(self.max_value, self.max_value_upper)
def is_full(self):
return self.idx >= self.size
| 1,997 | 31.225806 | 102 | py |
dynet | dynet-master/examples/reinforcement-learning/network.py | import dynet as dy
class Network(object):
def __init__(self, pc):
self.pc = dy.ParameterCollection() if pc is None else pc
def update(self, other, soft=False, tau=0.1):
params_self, params_other = self.pc.parameters_list(), other.pc.parameters_list()
for x, y in zip(params_self, params_other):
target_values = ((1 - tau) * x.as_array() + tau * y.as_array()) if soft else y.as_array()
x.set_value(target_values)
class MLP(Network):
def __init__(self, inpt_shape, hiddens, activation=dy.rectify, layer_norm=False, pc=None):
super().__init__(pc)
if len(inpt_shape) != 1:
raise ValueError("inpt_shape must be 1 dimension for MLP.")
self.specified_activation = hasattr(activation, "__len__")
self.activation = activation
self.layer_norm = layer_norm
units = [inpt_shape[0]] + hiddens
self.Ws, self.bs = [], []
if layer_norm:
self.ln_gs, self.ln_bs = [], []
for i in range(len(units) - 1):
self.Ws.append(self.pc.add_parameters((units[i + 1], units[i])))
self.bs.append(self.pc.add_parameters(units[i + 1]))
if layer_norm:
self.ln_gs.append(self.pc.add_parameters(units[i + 1]))
self.ln_bs.append(self.pc.add_parameters(units[i + 1]))
self.n_layers = len(self.Ws)
def __call__(self, obs, batched=False):
out = obs if isinstance(obs, dy.Expression) else dy.inputTensor(obs, batched=batched)
for i in range(self.n_layers):
b, W = dy.parameter(self.bs[i]), dy.parameter(self.Ws[i])
out = dy.affine_transform([b, W, out])
if self.layer_norm and i != self.n_layers - 1:
out = dy.layer_norm(out, self.ln_gs[i], self.ln_bs[i])
if self.specified_activation:
if self.activation[i] is not None:
out = self.activation[i](out)
else:
out = self.activation(out)
return out
class Header(Network):
def __init__(self, opt_size, network, dueling=False, **kwargs):
super().__init__(None)
self.network = network(**kwargs, pc=self.pc)
self.opt_size = opt_size
self.dueling = dueling
hiddens = kwargs['hiddens']
self.W = self.pc.add_parameters((opt_size, hiddens[-1]))
self.b = self.pc.add_parameters(opt_size)
if dueling:
self.W_extra = self.pc.add_parameters((1, hiddens[-1]))
self.b_extra = self.pc.add_parameters(1)
def __call__(self, obs, batched=False):
out = self.network(obs, batched)
W, b = dy.parameter(self.W), dy.parameter(self.b)
As = dy.affine_transform([b, W, out])
if self.dueling:
W_extra, b_extra = dy.parameter(self.W_extra), dy.parameter(self.b_extra)
V = dy.affine_transform([b_extra, W_extra, out])
return As, V
return As
| 2,989 | 38.866667 | 101 | py |
dynet | dynet-master/examples/reinforcement-learning/dqn.py | import dynet as dy
import numpy as np
from memory import Memory, PrioritizedMemory
# DeepQNetwork: https://arxiv.org/abs/1312.5602
# An reinforcement learning agent to learn in environments which have discrete action spaces.
# Double Q-Learning: https://arxiv.org/abs/1509.06461
# Prioritized Replay: https://arxiv.org/abs/1511.05952
# Dueling Network Architectures: https://arxiv.org/abs/1511.06581
class DeepQNetwork(object):
def __init__(self, network, memory_size, use_double_dqn=False, target_network=None, n_replace_target=500,
dueling=True, prioritized=True):
self.network = network
self.trainer = dy.AdamTrainer(network.pc)
self.trainer.set_clip_threshold(1.)
self.trainer.set_learning_rate(5e-4)
self.epsilon = 1.
self.epsilon_decrease = 1e-4
self.epsilon_lower = 0.05
self.reward_decay = 0.99
self.learn_step = 0
self.use_double_dqn = use_double_dqn
if use_double_dqn:
self.target_network = target_network
self.n_replace_target = n_replace_target
self.target_network.update(network)
self.dueling = dueling
self.prioritized = prioritized
if prioritized:
self.beta = 0.
self.beta_increase = self.epsilon_decrease
self.memory = PrioritizedMemory(memory_size) if prioritized else Memory(memory_size)
def act(self, obs, deterministic=True):
if np.random.random() < self.epsilon:
return np.random.choice(self.network.opt_size)
dy.renew_cg()
if self.dueling:
actions, _ = self.network(obs)
actions = actions.npvalue()
else:
actions = self.network(obs).npvalue()
if deterministic:
return np.argmax(actions)
else:
return np.random.choice(self.network.opt_size, p=actions)
def store(self, exp):
self.memory.store(exp)
def learn(self, batch_size):
if self.prioritized:
if not self.memory.is_full(): return -np.inf
indices, exps, weights = self.memory.sample(batch_size, self.beta)
else:
exps = self.memory.sample(batch_size)
obss, actions, rewards, obs_nexts, dones = self._process(exps)
dy.renew_cg()
target_network = self.target_network if self.use_double_dqn else self.network
if self.dueling:
target_values, v = target_network(obs_nexts, batched=True)
target_values = target_values.npvalue() + v.npvalue()
else:
target_values = target_network(obs_nexts, batched=True)
target_values = target_values.npvalue()
target_values = np.max(target_values, axis=0)
target_values = rewards + self.reward_decay * (target_values * (1 - dones))
dy.renew_cg()
if self.dueling:
all_values_expr, v = self.network(obss, batched=True)
else:
all_values_expr = self.network(obss, batched=True)
picked_values = dy.pick_batch(all_values_expr, actions)
diff = (picked_values + v if self.dueling else picked_values) - dy.inputTensor(target_values, batched=True)
if self.prioritized:
self.memory.update(indices, np.transpose(np.abs(diff.npvalue())))
losses = dy.pow(diff, dy.constant(1, 2))
if self.prioritized:
losses = dy.cmult(losses, dy.inputTensor(weights, batched=True))
loss = dy.sum_batches(losses)
loss_value = loss.npvalue()
loss.backward()
self.trainer.update()
self.epsilon = max(self.epsilon - self.epsilon_decrease, self.epsilon_lower)
if self.prioritized:
self.beta = min(self.beta + self.beta_increase, 1.)
self.learn_step += 1
if self.use_double_dqn and self.learn_step % self.n_replace_target == 0:
self.target_network.update(self.network)
return loss_value
# data in memory: [N, exp], exp: [obs, action, reward, obs_next, done]
# output: [obss, actions, rewards, obs_nexts, dones], 'X's: [x, batch_size]
@staticmethod
def _process(exps):
n = len(exps)
ret = []
for i in range(5):
ret.append([])
for j in range(n):
ret[i].append(exps[j][i])
ret = [np.transpose(arr) for arr in ret]
return ret
| 4,403 | 37.631579 | 115 | py |
dynet | dynet-master/examples/reinforcement-learning/train_test_utils.py | import numpy as np
def train_pipeline_progressive(env, player, score_threshold, batch_size, n_episode, learn_start=100, print_every=10):
rewards, losses = [], []
for i_episode in range(n_episode):
obs = env.reset()
reward = 0
for t in range(env._max_episode_steps):
action = player.act(obs)
obs_next, rwd, done, _ = env.step(action)
player.store((obs, action, rwd, obs_next, float(False if t == env._max_episode_steps - 1 else done)))
reward += rwd
if i_episode > learn_start:
loss = player.learn(batch_size)
if loss is not None: losses.append(loss)
if done: break
obs = obs_next
rewards.append(reward)
if i_episode % print_every == 0:
score = np.mean(rewards[-100:])
print("================================")
print("i_episode: {}".format(i_episode))
print("100 games mean reward: {}".format(score))
if len(losses) > 0:
print("100 games mean loss: {}".format(np.mean(losses[-100:])))
print("================================")
print()
if score > score_threshold: break
def train_pipeline_conservative(env, player, score_threshold, batch_size, n_epoch, n_rollout, n_train, learn_start=0,
early_stop=True):
for i_epoch in range(n_epoch):
rewards, losses = [], []
for _ in range(n_rollout):
reward = 0
obs = env.reset()
for t in range(env._max_episode_steps):
action = player.act(obs)
obs_next, rwd, done, _ = env.step(action)
player.store((obs, action, rwd, obs_next, float(False if t == env._max_episode_steps - 1 else done)))
reward += rwd
if done: break
obs = obs_next
rewards.append(reward)
if i_epoch > learn_start:
for _ in range(n_train):
loss = player.learn(batch_size)
if loss is not None: losses.append(loss)
if i_epoch % 1 == 0:
mean_reward = np.mean(rewards)
print("===========================")
print("i_epoch: {}".format(i_epoch))
print("epsilon: {}".format(player.epsilon))
print("Average score of {} rollout games: {}".format(n_rollout, mean_reward))
if i_epoch > learn_start:
print("Average training loss: {}".format(np.mean(losses)))
print("===========================")
print()
if early_stop and mean_reward >= score_threshold: break
def test(env, player, n_turns, render=False):
input("Ready to test., Press any key to coninue...")
for _ in range(n_turns):
score = 0
obs = env.reset()
for _ in range(env._max_episode_steps):
if render: env.render()
action = player.act(obs)
obs, reward, done, _ = env.step(action)
if render: env.render()
score += reward
if done:
print('The score is {}'.format(score))
break
if render: env.render()
| 3,261 | 39.271605 | 117 | py |
dynet | dynet-master/examples/reinforcement-learning/main_ddpg.py | import argparse
import gym
from ddpg import DDPG
from train_test_utils import train_pipeline_conservative, test
def establish_args():
parser = argparse.ArgumentParser()
parser.add_argument("--env_name", default="Walker2d-v2", type=str)
parser.add_argument("--memory_size", default=1e6, type=float)
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--n_epoch", default=500, type=int)
parser.add_argument("--rollout_per_epoch", default=100, type=int)
parser.add_argument("--train_per_epoch", default=100, type=int)
args = parser.parse_args()
return args
args = establish_args()
env = gym.make(args.env_name)
player = DDPG(env.observation_space.shape[0], env.action_space.shape[0], hiddens_actor=[64, 64],
hiddens_critic=[64, 64], memory_size=int(args.memory_size))
train_pipeline_conservative(env, player, score_threshold=999, batch_size=args.batch_size, n_epoch=args.n_epoch,
n_rollout=args.rollout_per_epoch, n_train=args.train_per_epoch)
test(env, player, n_turns=10, render=True)
| 1,094 | 39.555556 | 111 | py |
dynet | dynet-master/examples/reinforcement-learning/main_dqn.py | import argparse
import gym
from dqn import DeepQNetwork
from network import MLP, Header
from train_test_utils import train_pipeline_progressive, train_pipeline_conservative, test
def establish_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dynet-gpus', default=0, type=int)
parser.add_argument('--env_id', default=0, type=int)
parser.add_argument('--double', default=False, action='store_true')
parser.add_argument('--dueling', default=False, action='store_true')
parser.add_argument('--prioritized', default=False, action='store_true')
args = parser.parse_args()
return args
# ==== args ====
args = establish_args()
# ==== environment =====
ENVs = ['CartPole-v1', 'Acrobot-v1', 'MountainCar-v0']
score_thresholds = [499, -100, -100]
env_id = args.env_id
ENV, score_threshold = ENVs[env_id], score_thresholds[env_id]
env = gym.make(ENV)
# ==== DQN ====
MEMORY_SIZE = 50000
HIDDENS = [128]
network = Header(inpt_shape=env.observation_space.shape, hiddens=HIDDENS, opt_size=env.action_space.n,
network=MLP, dueling=args.dueling)
target_network = Header(inpt_shape=env.observation_space.shape, hiddens=HIDDENS,
opt_size=env.action_space.n, network=MLP, dueling=args.dueling) if args.double else None
dqn = DeepQNetwork(network=network, memory_size=MEMORY_SIZE, use_double_dqn=args.double, target_network=target_network,
dueling=args.dueling, prioritized=args.prioritized)
# ==== train & test ====
# choose one of the two pipelines
if env_id == 0:
train_pipeline_conservative(env, dqn, score_threshold, n_epoch=500, n_rollout=100, n_train=1000, batch_size=256)
if env_id == 1 or env_id == 2:
train_pipeline_progressive(env, dqn, score_threshold, batch_size=32, n_episode=2000)
test(env, dqn, n_turns=10, render=True)
| 1,846 | 33.849057 | 119 | py |
dynet | dynet-master/examples/tagger/bilstmtagger.py | import dynet as dy
from collections import Counter
import random
import util
# format of files: each line is "word<TAB>tag<newline>", blank line is new sentence.
train_file="/Users/yogo/Vork/Research/corpora/pos/WSJ.TRAIN"
test_file="/Users/yogo/Vork/Research/corpora/pos/WSJ.TEST"
MLP=True
def read(fname):
sent = []
for line in open(fname):
line = line.strip().split()
if not line:
if sent: yield sent
sent = []
else:
w,p = line
sent.append((w,p))
train=list(read(train_file))
test=list(read(test_file))
words=[]
tags=[]
wc=Counter()
for s in train:
for w,p in s:
words.append(w)
tags.append(p)
wc[w]+=1
words.append("_UNK_")
#words=[w if wc[w] > 1 else "_UNK_" for w in words]
tags.append("_START_")
for s in test:
for w,p in s:
words.append(w)
vw = util.Vocab.from_corpus([words])
vt = util.Vocab.from_corpus([tags])
UNK = vw.w2i["_UNK_"]
nwords = vw.size()
ntags = vt.size()
model = dy.Model()
trainer = dy.SimpleSGDTrainer(model)
E = model.add_lookup_parameters((nwords, 128))
p_t1 = model.add_lookup_parameters((ntags, 30))
if MLP:
pH = model.add_parameters((32, 50*2))
pO = model.add_parameters((ntags, 32))
else:
pO = model.add_parameters((ntags, 50*2))
builders=[
dy.LSTMBuilder(1, 128, 50, model),
dy.LSTMBuilder(1, 128, 50, model),
]
def build_tagging_graph(words, tags, builders):
dy.renew_cg()
f_init, b_init = [b.initial_state() for b in builders]
wembs = [E[w] for w in words]
wembs = [dy.noise(we,0.1) for we in wembs]
fw = [x.output() for x in f_init.add_inputs(wembs)]
bw = [x.output() for x in b_init.add_inputs(reversed(wembs))]
if MLP:
H = dy.parameter(pH)
O = dy.parameter(pO)
else:
O = dy.parameter(pO)
errs = []
for f,b,t in zip(fw, reversed(bw), tags):
f_b = dy.concatenate([f,b])
if MLP:
r_t = O*(dy.tanh(H * f_b))
else:
r_t = O * f_b
err = dy.pickneglogsoftmax(r_t, t)
errs.append(err)
return dy.esum(errs)
def tag_sent(sent, builders):
dy.renew_cg()
f_init, b_init = [b.initial_state() for b in builders]
wembs = [E[vw.w2i.get(w, UNK)] for w,t in sent]
fw = [x.output() for x in f_init.add_inputs(wembs)]
bw = [x.output() for x in b_init.add_inputs(reversed(wembs))]
if MLP:
H = dy.parameter(pH)
O = dy.parameter(pO)
else:
O = dy.parameter(pO)
tags=[]
for f,b,(w,t) in zip(fw,reversed(bw),sent):
if MLP:
r_t = O*(dy.tanh(H * dy.concatenate([f,b])))
else:
r_t = O*dy.concatenate([f,b])
out = dy.softmax(r_t)
chosen = np.argmax(out.npvalue())
tags.append(vt.i2w[chosen])
return tags
tagged = loss = 0
for ITER in range(50):
random.shuffle(train)
for i,s in enumerate(train,1):
if i % 5000 == 0:
trainer.status()
print(loss / tagged)
loss = 0
tagged = 0
if i % 10000 == 0:
good = bad = 0.0
for sent in test:
tags = tag_sent(sent, builders)
golds = [t for w,t in sent]
for go,gu in zip(golds,tags):
if go == gu: good +=1
else: bad+=1
print(good/(good+bad))
ws = [vw.w2i.get(w, UNK) for w,p in s]
ps = [vt.w2i[p] for w,p in s]
sum_errs = build_tagging_graph(ws,ps,builders)
squared = -sum_errs# * sum_errs
loss += sum_errs.scalar_value()
tagged += len(ps)
sum_errs.backward()
trainer.update()
| 3,719 | 24.655172 | 84 | py |
dynet | dynet-master/examples/transformer/wrap-data.py | import sys
import collections
import itertools
def threshold_vocab(fname, threshold):
word_counts = collections.Counter()
with open(fname) as fin:
for line in fin:
for token in line.split():
word_counts[token] += 1
ok = set()
for word, count in sorted(word_counts.items()):
if count >= threshold:
ok.add(word)
return ok
def load_vocab_from_file(fname):
vocab = set()
fv = open(fname, 'rb')
for line in fv:
vocab.add(line.strip())
return vocab
sfname = sys.argv[3] + "." + sys.argv[1] # '/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/train.en'
tfname = sys.argv[3] + "." + sys.argv[2] #'/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/train.vi'
argc = len(sys.argv)
if argc == 7:
source_vocab = load_vocab_from_file(sys.argv[6] + "." + sys.argv[1]) # '/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/vocab.en'
target_vocab = load_vocab_from_file(sys.argv[6] + "." + sys.argv[2]) # '/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/vocab.vi'
elif argc == 8:
source_vocab = threshold_vocab(sfname, int(sys.argv[6]))
target_vocab = threshold_vocab(tfname, int(sys.argv[7]))
else: exit()
def process_corpus(sf, tf, of, sv=source_vocab, tv=target_vocab):
with open(of, 'w') as fout:
with open(sf) as sin:
with open(tf) as tin:
for sline, tline in itertools.izip(sin, tin):
print >>fout, '<s>',
for token in sline.split():
if token in sv:
print >>fout, token,
else:
print >>fout, '<unk>',
print >>fout, '</s>', '|||',
print >>fout, '<s>',
for token in tline.split():
if token in tv:
print >>fout, token,
else:
print >>fout, '<unk>',
print >>fout, '</s>'
def process_corpus_r(sf, tf, of, sv=source_vocab, tv=target_vocab):
with open(of, 'w') as fout:
with open(sf) as sin:
with open(tf) as tin:
for sline, tline in itertools.izip(sin, tin):
print >>fout, '<s>',
for token in tline.split():
if token in tv:
print >>fout, token,
else:
print >>fout, '<unk>',
print >>fout, '</s>', '|||',
print >>fout, '<s>',
for token in sline.split():
if token in sv:
print >>fout, token,
else:
print >>fout, '<unk>',
print >>fout, '</s>'
def process_test(sf, of, vocab):
with open(of, 'w') as fout:
with open(sf) as sin:
for sline in sin:
print >>fout, '<s>',
for token in sline.split():
if token in vocab:
print >>fout, token,
else:
print >>fout, '<unk>',
print >>fout, '</s>'
ofname = sys.argv[3] + "." + sys.argv[1] + "-" + sys.argv[2] + ".capped" # '/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/train.en-vi.vcb.capped'
process_corpus(sfname, tfname, ofname) #train (for training)
process_corpus(sys.argv[4] + "." + sys.argv[1], sys.argv[4] + "." + sys.argv[2], sys.argv[4] + "." + sys.argv[1] + "-" + sys.argv[2] + ".capped") # process_corpus('/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/tst2012.en', '/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/tst2012.vi', '/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/tst2012.en-vi.vcb.capped') #dev (for training)
process_test(sys.argv[4] + "." + sys.argv[1], sys.argv[4] + "." + sys.argv[1] + ".capped", source_vocab) # process_test('/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/tst2012.en','/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/tst2012.en.vcb.capped', source_vocab) #dev (for decoding)
process_test(sys.argv[5] + "." + sys.argv[1], sys.argv[5] + "." + sys.argv[1] + ".capped", source_vocab) # process_test('/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/tst2013.en','/home/vhoang2/tools/nmt/nmt/scripts/iwslt15/tst2013.en.vcb.capped', source_vocab) #test (for decoding)
| 4,419 | 43.646465 | 370 | py |
dynet | dynet-master/examples/batching/rnnlm-batch.py | import dynet as dy
import time
import random
LAYERS = 2
INPUT_DIM = 256 #50 #256
HIDDEN_DIM = 256 # 50 #1024
VOCAB_SIZE = 0
MB_SIZE = 50 # mini batch size
import argparse
from collections import defaultdict
from itertools import count
import sys
import util
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder):
# Char-level LSTM (layers=2, input=256, hidden=128, model)
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# Lookup parameters for word embeddings
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
# Softmax weights/biases on top of LSTM outputs
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
# Build the language model graph
def BuildLMGraph(self, sents):
dy.renew_cg()
# initialize the RNN
init_state = self.builder.initial_state()
# parameters -> expressions
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
S = vocab.w2i["<s>"]
# get the cids and masks for each step
tot_chars = 0
cids = []
masks = []
for i in range(len(sents[0])):
cids.append([(vocab.w2i[sent[i]] if len(sent) > i else S) for sent in sents])
mask = [(1 if len(sent)>i else 0) for sent in sents]
masks.append(mask)
tot_chars += sum(mask)
# start the rnn with "<s>"
init_ids = cids[0]
s = init_state.add_input(dy.lookup_batch(self.lookup, init_ids))
losses = []
# feed char vectors into the RNN and predict the next char
for cid, mask in zip(cids[1:], masks[1:]):
score = dy.affine_transform([bias, R, s.output()])
loss = dy.pickneglogsoftmax_batch(score, cid)
# mask the loss if at least one sentence is shorter
if mask[-1] != 1:
mask_expr = dy.inputVector(mask)
mask_expr = dy.reshape(mask_expr, (1,), len(sents))
loss = loss * mask_expr
losses.append(loss)
# update the state of the RNN
cemb = dy.lookup_batch(self.lookup, cid)
s = s.add_input(cemb)
return dy.sum_batches(dy.esum(losses)), tot_chars
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
dy.renew_cg()
state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
cw = first
while True:
x_t = dy.lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = dy.softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='Path to the corpus file.')
args = parser.parse_args()
train = util.CharsCorpusReader(args.corpus, begin="<s>")
vocab = util.Vocab.from_corpus(train)
VOCAB_SIZE = vocab.size()
model = dy.Model()
trainer = dy.SimpleSGDTrainer(model)
#lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=SimpleRNNBuilder)
lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.LSTMBuilder)
train = list(train)
# Sort training sentences in descending order and count minibatches
train.sort(key=lambda x: -len(x))
train_order = [x*MB_SIZE for x in range(int((len(train)-1)/MB_SIZE + 1))]
# Perform training
i = 0
chars = loss = 0.0
for ITER in range(100):
random.shuffle(train_order)
#_start = time.time()
for sid in train_order:
i += 1
#if i % int(50) == 0:
trainer.status()
if chars > 0: print(loss / chars,)
for _ in range(1):
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
loss = 0.0
chars = 0.0
# train on the minibatch
errs, mb_chars = lm.BuildLMGraph(train[sid: sid + MB_SIZE])
loss += errs.scalar_value()
chars += mb_chars
errs.backward()
trainer.update()
#print "TM:",(time.time() - _start)/chars
print("ITER",ITER,loss)
#print(loss / chars,)
#print "TM:",(time.time() - _start)/len(train)
trainer.status()
| 4,910 | 32.182432 | 102 | py |
dynet | dynet-master/examples/batching/minibatch.py | import dynet as dy
import numpy as np
m = dy.Model()
lp = m.add_lookup_parameters((100,10))
# regular lookup
a = lp[1].npvalue()
b = lp[2].npvalue()
c = lp[3].npvalue()
# batch lookup instead of single elements.
# two ways of doing this.
abc1 = dy.lookup_batch(lp, [1,2,3])
print(abc1.npvalue())
abc2 = lp.batch([1,2,3])
print(abc2.npvalue())
print(np.hstack([a,b,c]))
# use pick and pickneglogsoftmax in batch mode
# (must be used in conjunction with lookup_batch):
print("\nPick")
W = dy.parameter( m.add_parameters((5, 10)) )
h = W * lp.batch([1,2,3])
print(h.npvalue())
print(dy.pick_batch(h,[1,2,3]).npvalue())
print(dy.pick(W*lp[1],1).value(), dy.pick(W*lp[2],2).value(), dy.pick(W*lp[3],3).value())
# using pickneglogsoftmax_batch
print("\nPick neg log softmax")
print((-dy.log(dy.softmax(h))).npvalue())
print(dy.pickneglogsoftmax_batch(h,[1,2,3]).npvalue())
| 875 | 23.333333 | 89 | py |
dynet | dynet-master/examples/sequence-to-sequence/attention.py | import dynet as dy
import random
EOS = "<EOS>"
characters = list("abcdefghijklmnopqrstuvwxyz ")
characters.append(EOS)
int2char = list(characters)
char2int = {c:i for i,c in enumerate(characters)}
VOCAB_SIZE = len(characters)
LSTM_NUM_OF_LAYERS = 2
EMBEDDINGS_SIZE = 32
STATE_SIZE = 32
ATTENTION_SIZE = 32
model = dy.Model()
enc_fwd_lstm = dy.LSTMBuilder(LSTM_NUM_OF_LAYERS, EMBEDDINGS_SIZE, STATE_SIZE, model)
enc_bwd_lstm = dy.LSTMBuilder(LSTM_NUM_OF_LAYERS, EMBEDDINGS_SIZE, STATE_SIZE, model)
dec_lstm = dy.LSTMBuilder(LSTM_NUM_OF_LAYERS, STATE_SIZE*2+EMBEDDINGS_SIZE, STATE_SIZE, model)
input_lookup = model.add_lookup_parameters((VOCAB_SIZE, EMBEDDINGS_SIZE))
attention_w1 = model.add_parameters( (ATTENTION_SIZE, STATE_SIZE*2))
attention_w2 = model.add_parameters( (ATTENTION_SIZE, STATE_SIZE*LSTM_NUM_OF_LAYERS*2))
attention_v = model.add_parameters( (1, ATTENTION_SIZE))
decoder_w = model.add_parameters( (VOCAB_SIZE, STATE_SIZE))
decoder_b = model.add_parameters( (VOCAB_SIZE))
output_lookup = model.add_lookup_parameters((VOCAB_SIZE, EMBEDDINGS_SIZE))
def embed_sentence(sentence):
sentence = [EOS] + list(sentence) + [EOS]
sentence = [char2int[c] for c in sentence]
global input_lookup
return [input_lookup[char] for char in sentence]
def run_lstm(init_state, input_vecs):
s = init_state
out_vectors = []
for vector in input_vecs:
s = s.add_input(vector)
out_vector = s.output()
out_vectors.append(out_vector)
return out_vectors
def encode_sentence(enc_fwd_lstm, enc_bwd_lstm, sentence):
sentence_rev = list(reversed(sentence))
fwd_vectors = run_lstm(enc_fwd_lstm.initial_state(), sentence)
bwd_vectors = run_lstm(enc_bwd_lstm.initial_state(), sentence_rev)
bwd_vectors = list(reversed(bwd_vectors))
vectors = [dy.concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)]
return vectors
def attend(input_mat, state, w1dt):
global attention_w2
global attention_v
w2 = dy.parameter(attention_w2)
v = dy.parameter(attention_v)
# input_mat: (encoder_state x seqlen) => input vecs concatenated as cols
# w1dt: (attdim x seqlen)
# w2dt: (attdim x attdim)
w2dt = w2*dy.concatenate(list(state.s()))
# att_weights: (seqlen,) row vector
unnormalized = dy.transpose(v * dy.tanh(dy.colwise_add(w1dt, w2dt)))
att_weights = dy.softmax(unnormalized)
# context: (encoder_state)
context = input_mat * att_weights
return context
def decode(dec_lstm, vectors, output):
output = [EOS] + list(output) + [EOS]
output = [char2int[c] for c in output]
w = dy.parameter(decoder_w)
b = dy.parameter(decoder_b)
w1 = dy.parameter(attention_w1)
input_mat = dy.concatenate_cols(vectors)
w1dt = None
last_output_embeddings = output_lookup[char2int[EOS]]
s = dec_lstm.initial_state().add_input(dy.concatenate([dy.vecInput(STATE_SIZE*2), last_output_embeddings]))
loss = []
for char in output:
# w1dt can be computed and cached once for the entire decoding phase
w1dt = w1dt or w1 * input_mat
vector = dy.concatenate([attend(input_mat, s, w1dt), last_output_embeddings])
s = s.add_input(vector)
out_vector = w * s.output() + b
probs = dy.softmax(out_vector)
last_output_embeddings = output_lookup[char]
loss.append(-dy.log(dy.pick(probs, char)))
loss = dy.esum(loss)
return loss
def generate(in_seq, enc_fwd_lstm, enc_bwd_lstm, dec_lstm):
embedded = embed_sentence(in_seq)
encoded = encode_sentence(enc_fwd_lstm, enc_bwd_lstm, embedded)
w = dy.parameter(decoder_w)
b = dy.parameter(decoder_b)
w1 = dy.parameter(attention_w1)
input_mat = dy.concatenate_cols(encoded)
w1dt = None
last_output_embeddings = output_lookup[char2int[EOS]]
s = dec_lstm.initial_state().add_input(dy.concatenate([dy.vecInput(STATE_SIZE * 2), last_output_embeddings]))
out = ''
count_EOS = 0
for i in range(len(in_seq)*2):
if count_EOS == 2: break
# w1dt can be computed and cached once for the entire decoding phase
w1dt = w1dt or w1 * input_mat
vector = dy.concatenate([attend(input_mat, s, w1dt), last_output_embeddings])
s = s.add_input(vector)
out_vector = w * s.output() + b
probs = dy.softmax(out_vector).vec_value()
next_char = probs.index(max(probs))
last_output_embeddings = output_lookup[next_char]
if int2char[next_char] == EOS:
count_EOS += 1
continue
out += int2char[next_char]
return out
def get_loss(input_sentence, output_sentence, enc_fwd_lstm, enc_bwd_lstm, dec_lstm):
dy.renew_cg()
embedded = embed_sentence(input_sentence)
encoded = encode_sentence(enc_fwd_lstm, enc_bwd_lstm, embedded)
return decode(dec_lstm, encoded, output_sentence)
def train(model, sentence):
trainer = dy.SimpleSGDTrainer(model)
for i in range(600):
loss = get_loss(sentence, sentence, enc_fwd_lstm, enc_bwd_lstm, dec_lstm)
loss_value = loss.value()
loss.backward()
trainer.update()
if i % 20 == 0:
print(loss_value)
print(generate(sentence, enc_fwd_lstm, enc_bwd_lstm, dec_lstm))
train(model, "it is working")
| 5,302 | 31.533742 | 113 | py |
dynet | dynet-master/examples/xor/xor.py | import sys
import dynet as dy
#xsent = True
xsent = False
HIDDEN_SIZE = 8
ITERATIONS = 2000
m = dy.Model()
trainer = dy.SimpleSGDTrainer(m)
W = m.add_parameters((HIDDEN_SIZE, 2))
b = m.add_parameters(HIDDEN_SIZE)
V = m.add_parameters((1, HIDDEN_SIZE))
a = m.add_parameters(1)
if len(sys.argv) == 2:
m.populate_from_textfile(sys.argv[1])
x = dy.vecInput(2)
y = dy.scalarInput(0)
h = dy.tanh((W*x) + b)
if xsent:
y_pred = dy.logistic((V*h) + a)
loss = dy.binary_log_loss(y_pred, y)
T = 1
F = 0
else:
y_pred = (V*h) + a
loss = dy.squared_distance(y_pred, y)
T = 1
F = -1
for iter in range(ITERATIONS):
mloss = 0.0
for mi in range(4):
x1 = mi % 2
x2 = (mi // 2) % 2
x.set([T if x1 else F, T if x2 else F])
y.set(T if x1 != x2 else F)
mloss += loss.scalar_value()
loss.backward()
trainer.update()
mloss /= 4.
print("loss: %0.9f" % mloss)
x.set([F,T])
z = -(-y_pred)
print(z.scalar_value())
m.save("xor.pymodel")
dy.renew_cg()
x = dy.vecInput(2)
y = dy.scalarInput(0)
h = dy.tanh((W*x) + b)
if xsent:
y_pred = dy.logistic((V*h) + a)
else:
y_pred = (V*h) + a
x.set([T,F])
print("TF",y_pred.scalar_value())
x.set([F,F])
print("FF",y_pred.scalar_value())
x.set([T,T])
print("TT",y_pred.scalar_value())
x.set([F,T])
print("FT",y_pred.scalar_value())
| 1,367 | 17.739726 | 47 | py |
dynet | dynet-master/python/dynet_config.py | def set(mem="512", random_seed=0, autobatch=0,
profiling=0, weight_decay=0, shared_parameters=0,
requested_gpus=0, gpu_mask=None):
if "__DYNET_CONFIG" in __builtins__:
(mem, random_seed, auto_batch, profiling) = (
__builtins__["__DYNET_CONFIG"]["mem"] if __builtins__["__DYNET_CONFIG"].get("mem") == mem else mem,
__builtins__["__DYNET_CONFIG"]["seed"] if __builtins__["__DYNET_CONFIG"].get("seed") == random_seed else random_seed,
__builtins__["__DYNET_CONFIG"]["autobatch"] if __builtins__["__DYNET_CONFIG"].get("autobatch") == autobatch else autobatch,
__builtins__["__DYNET_CONFIG"]["profiling"] if __builtins__["__DYNET_CONFIG"].get("profiling") == profiling else profiling)
(weight_decay, shared_parameters, requested_gpus, gpu_mask) = (
__builtins__["__DYNET_CONFIG"]["weight_decay"] if __builtins__["__DYNET_CONFIG"].get("weight_decay") == weight_decay else weight_decay,
__builtins__["__DYNET_CONFIG"]["shared_params"] if __builtins__["__DYNET_CONFIG"].get("shared_params") == shared_parameters else shared_parameters,
__builtins__["__DYNET_CONFIG"]["requested_gpus"] if __builtins__["__DYNET_CONFIG"].get("requested_gpus") == requested_gpus else requested_gpus,
__builtins__["__DYNET_CONFIG"]["gpu_mask"] if __builtins__["__DYNET_CONFIG"].get("gpu_mask") == gpu_mask else gpu_mask)
# TODO read "gpu_mask" from list of IDs?
__builtins__["__DYNET_CONFIG"] = {
"mem":mem, "seed": random_seed, "autobatch": autobatch,
"profiling":profiling, "weight_decay": weight_decay,
"shared_params": shared_parameters,
"requested_gpus": requested_gpus,
"gpu_mask": gpu_mask if gpu_mask else list(),
}
def set_gpu(flag=True):
__builtins__["__DYNET_GPU"]=flag
if "__DYNET_CONFIG" in __builtins__:
__builtins__["__DYNET_CONFIG"]["requested_gpus"] = 1
else:
set(requested_gpus=1)
def gpu():
if "__DYNET_GPU" in __builtins__:
return __builtins__["__DYNET_GPU"]
return None
def get():
if "__DYNET_CONFIG" in __builtins__:
return __builtins__["__DYNET_CONFIG"]
return None
| 2,199 | 51.380952 | 159 | py |
dynet | dynet-master/python/dynet_viz.py | from __future__ import print_function
import sys
import re
from collections import defaultdict
if sys.version_info.major > 2:
# alias dict.items() as dict.iteritems() in python 3+
class compat_dict(defaultdict):
pass
compat_dict.iteritems = defaultdict.items
defaultdict = compat_dict
# add xrange to python 3+
xrange = range
graphviz_items = []
vindex_count = -1
def new_index():
global vindex_count
vindex_count += 1
return vindex_count
def init(random_seed=None): pass
class SimpleConcreteDim(object):
def __init__(self, nrows, ncols, inferred):
self.nrows = nrows
self.ncols = ncols
self.inferred = inferred
def __getitem__(self, key): return [self.nrows, self.ncols][key]
def __iter__(self): return iter([self.nrows, self.ncols])
def __str__(self): return 'Dim(%s,%s)' % (self.nrows, self.ncols)
def __eq__(self, other): return isinstance(other, SimpleConcreteDim) and self.nrows==other.nrows and self.ncols==other.ncols
def __ne__(self, other): return not self==other
def __hash__(self): return hash((self.nrows, self.ncols))
def isvalid(self): return True
def invalid(self): return False
class InvalidConcreteDim(object):
def __init__(self, a_dim=None, b_dim=None):
self.a_dim = a_dim
self.b_dim = b_dim
def __getitem__(self, key): return None
def __repr__(self):
if self.a_dim is None and self.b_dim is None:
return 'InvalidDim'
else:
return 'InvalidDim(%s, %s)' % (self.a_dim, self.b_dim)
def __str__(self): return repr(self)
def isvalid(self): return False
def invalid(self): return True
InvalidDim = InvalidConcreteDim()
def make_dim(a, b=None, inferred=False):
if isinstance(a, InvalidConcreteDim):
assert b is None
return a
elif isinstance(a, SimpleConcreteDim):
assert b is None
return SimpleConcreteDim(a.nrows, a.ncols, inferred)
elif isinstance(a, tuple):
assert b is None
assert len(a) == 2, str(a)
(nrows, ncols) = a
return SimpleConcreteDim(nrows, ncols, inferred)
elif b is None:
assert isinstance(a, int) or (isinstance(a, float) and int(a) == a)
return SimpleConcreteDim(a, 1, inferred)
else:
assert isinstance(a, int) or (isinstance(a, float) and int(a) == a)
assert isinstance(b, int) or (isinstance(b, float) and int(b) == b)
return SimpleConcreteDim(a, b, inferred)
def ensure_freshness(a):
if a.cg_version != _cg.version(): raise ValueError("Attempt to use a stale expression.")
def copy_dim(a):
if a.dim.isvalid():
return make_dim(a.dim, inferred=True)
else:
return InvalidDim
def ensure_same_dim(a,b):
if a.dim.invalid() or b.dim.invalid():
return InvalidDim
elif a.dim==b.dim:
return copy_dim(a)
else:
return InvalidConcreteDim(a.dim,b.dim)
def ensure_mul_dim(a,b):
if a.dim.invalid() or b.dim.invalid():
return InvalidDim
elif a.dim[1]==b.dim[0]:
return make_dim(a.dim[0], b.dim[1], inferred=True)
else:
return InvalidConcreteDim(a.dim,b.dim)
def ensure_all_same_dim(xs):
for x in xs:
if x.dim.invalid():
return InvalidDim
dim0 = xs[0].dim
for x in xs[1:]:
if dim0 != x.dim:
return InvalidConcreteDim(dim0, x.dim)
return copy_dim(xs[0])
def _add(a, b): return GVExpr('add', [a,b], ensure_same_dim(a,b))
def _mul(a, b): return GVExpr('mul', [a,b], ensure_mul_dim(a,b))
def _neg(a): return GVExpr('neg', [a], copy_dim(a))
def _scalarsub(a, b): return GVExpr('scalarsub', [a,b], copy_dim(b))
def _cadd(a, b): return GVExpr('cadd', [a,b], copy_dim(a))
def _cmul(a, b): return GVExpr('cmul', [a,b], copy_dim(a))
def _cdiv(a, b): return GVExpr('cdiv', [a,b], copy_dim(a))
class Expression(object): #{{{
def __init__(self, name, args, dim):
self.name = name
self.args = args
self.dim = dim
self.vindex = new_index()
self.cg_version = cg().version()
def cg(self): return cg()
def get_cg_version(self): return self.cg_version
def get_vindex(self): return self.vindex
def __repr__(self): return str(self)
def __str__(self): return '%s([%s], %s, %s/%s)' % (self.name, ', '.join(map(str,self.args)), self.dim, self.vindex, self.cg_version) #"expression %s/%s" % (self.vindex, self.cg_version)
def __getitem__(self, i): return lookup(self, i)
def __getslice__(self, i, j): return None
def scalar_value(self, recalculate=False): return 0.0
def vec_value(self, recalculate=False): return []
def npvalue(self, recalculate=False): return None
def value(self, recalculate=False): return None
def forward(self, recalculate=False): return None
def set(self, x): pass
def batch(self, i): return lookup_batch(self, i)
def zero(self): return self
def backward(self): pass
def __add__(self, other):
if isinstance(self, Expression) and isinstance(other, Expression):
return _add(self,other)
elif isinstance(self, (int,float)) or isinstance(other, (int,float)):
return _cadd(self, other)
else: raise NotImplementedError('self=%s, other=%s' % (self, other))
def __mul__(self, other):
if isinstance(self, Expression) and isinstance(other, Expression):
return _mul(self,other)
elif isinstance(self, (int,float)) or isinstance(other, (int,float)):
return _cmul(self, other)
else: raise NotImplementedError('self=%s, other=%s' % (self, other))
def __div__(self, other):
if isinstance(self, Expression) and isinstance(other, (int,float)):
return _cdiv(self, other)
else: raise NotImplementedError()
def __neg__(self): return _neg(self)
def __sub__(self, other):
if isinstance(self,Expression) and isinstance(other,Expression):
return self+(-other)
elif isinstance(self,(int,float)) and isinstance(other,Expression):
return _scalarsub(self, other)
elif isinstance(self,Expression) and isinstance(other,(int, float)):
return _neg(_scalarsub(other, self))
else: raise NotImplementedError()
def init_row(self, i, row): pass
def init_from_array(self, *args, **kwargs): pass
def set_updated(self, *args, **kwargs): pass
def GVExpr(name, args, dim):
e = Expression(name, args, dim)
graphviz_items.append(e)
return e
class Model(object):
def add_parameters(self, dim, scale=0, *args, **kwargs):
assert(isinstance(dim,(tuple,int)))
pp = Expression('parameters', [dim], make_dim(dim))
return pp
def add_lookup_parameters(self, dim, *args, **kwargs):
assert(isinstance(dim, tuple))
pp = Expression('lookup_parameters', [dim], make_dim(dim[1]))
return pp
def save_all(self, fname): pass
def load_all(self, fname): pass
def save(self, fname): pass
def load(self, fname): pass
SECRET = 923148
#_cg = ComputationGraph(SECRET)
def cg_version(): return _cg._cg_version
def renew_cg(immediate_compute=False, check_validity=False): return _cg.renew(immediate_compute, check_validity)
def cg():
global _cg
return _cg
class ComputationGraph(object):
def __init__(self, guard=0):
if guard != SECRET: raise RuntimeError("Do not instantiate ComputationGraph directly. Use pydynet.cg()")
self._cg_version = 0
def renew(self, immediate_compute=False, check_validity=False):
vindex_count = -1
del graphviz_items[:]
return self
def version(self): return self._cg_version
def parameters(self, params):
graphviz_items.append(params)
return params
def forward_scalar(self): return 0.0
def inc_forward_scalar(self): return 0.0
def forward_vec(self): return []
def inc_forward_vec(self): return []
def forward(self): return None
def inc_forward(self): return None
def backward(self): return None
_cg = ComputationGraph(SECRET)
# }}}
def parameter(p):
graphviz_items.append(p)
return p
def scalarInput(s): return GVExpr('scalarInput', [s], make_dim(1, inferred=True))
def vecInput(dim): return GVExpr('vecInput', [dim], make_dim(dim))
def inputVector(v): return GVExpr('inputVector', [v], make_dim(len(v), inferred=True))
def matInput(d1, d2): return GVExpr('matInput', [d1, d2], make_dim(d1, d2))
def inputMatrix(v, d): return GVExpr('inputMatrix', [v, d], make_dim(d, inferred=True))
def lookup(p, index=0, update=True): return GVExpr('lookup', [p, index, update], p.dim)
def lookup_batch(p, indices, update=True): return GVExpr('lookup_batch', [p, indices, update], p.dim)
def pick(a, index=0, dim=0): return GVExpr('pick', [a, index], make_dim(1, inferred=True))
def pick_batch(a, indices, dim=0): return GVExpr('pick_batch', [a, indices], make_dim(len(indices), inferred=True))
def hinge(x, index, m=1.0): return GVExpr('hinge', [x, index, m], copy_dim(x))
def max_dim(a, d=0): return GVExpr('max_dim', [a, d], make_dim(1, inferred=True))
def min_dim(a, d=0): return GVExpr('min_dim', [a, d], make_dim(1, inferred=True))
def nobackprop(x): return GVExpr('nobackprop', [x], copy_dim(x))
def flip_gradient(x): return GVExpr('flip_gradient', [x], copy_dim(x))
# binary-exp
def cdiv(x, y): return GVExpr('cdiv', [x,y], ensure_same_dim(x,y))
def colwise_add(x, y):
if x.dim.invalid() or y.dim.invalid():
d = InvalidDim
elif x.dim[0] == y.dim[0] and y.dim[1] == 1:
d = copy_dim(x)
else:
d = InvalidConcreteDim(x.dim, y.dim)
return GVExpr('colwise_add', [x,y], d)
def trace_of_product(x, y): return GVExpr('trace_of_product', [x,y], ensure_same_dim(x,y))
def cmult(x, y): return GVExpr('cmult', [x,y], ensure_same_dim(x,y))
def dot_product(x, y): return GVExpr('dot_product', [x,y], ensure_same_dim(x,y))
def squared_distance(x, y): return GVExpr('squared_distance', [x,y], ensure_same_dim(x,y))
def l1_distance(x, y): return GVExpr('l1_distance', [x,y], ensure_same_dim(x,y))
def binary_log_loss(x, y): return GVExpr('binary_log_loss', [x,y], ensure_same_dim(x,y))
#def conv1d_narrow(x, y):
# if x.dim.invalid() or y.dim.invalid():
# d = InvalidDim
# elif x.dim[0] != y.dim[0]:
# d = InvalidConcreteDim(x.dim, y.dim)
# else:
# d = make_dim(x.dim[0], x.dim[1] - y.dim[1] + 1)
# return GVExpr('conv1d_narrow', [x,y], d)
#def conv1d_wide(x, y):
# if x.dim.invalid() or y.dim.invalid():
# d = InvalidDim
# elif x.dim[0] != y.dim[0]:
# d = InvalidConcreteDim(x.dim, y.dim)
# else:
# d = make_dim(x.dim[0], x.dim[1] + y.dim[1] - 1)
# return GVExpr('conv1d_wide', [x,y], d)
def filter1d_narrow(x, y):
if x.dim.invalid() or y.dim.invalid():
d = InvalidDim
elif x.dim[0] != y.dim[0]:
d = InvalidConcreteDim(x.dim, y.dim)
else:
d = make_dim(x.dim[0], x.dim[1] - y.dim[1] + 1)
return GVExpr('filter1d_narrow', [x,y], d)
# unary-exp
def tanh(x): return GVExpr('tanh', [x], copy_dim(x))
def exp(x): return GVExpr('exp', [x], copy_dim(x))
def square(x): return GVExpr('square', [x], copy_dim(x))
def sqrt(x): return GVExpr('sqrt', [x], copy_dim(x))
def erf(x): return GVExpr('erf', [x], copy_dim(x))
def cube(x): return GVExpr('cube', [x], copy_dim(x))
def log(x): return GVExpr('log', [x], copy_dim(x))
def lgamma(x): return GVExpr('lgamma', [x], copy_dim(x))
def logistic(x): return GVExpr('logistic', [x], copy_dim(x))
def rectify(x): return GVExpr('rectify', [x], copy_dim(x))
def log_softmax(x, restrict=None): return GVExpr('log_softmax', [x,restrict], copy_dim(x))
def softmax(x): return GVExpr('softmax', [x], copy_dim(x))
def softsign(x): return GVExpr('softsign', [x], copy_dim(x))
def pow(x, y): return GVExpr('pow', [x,y], ensure_same_dim(x,y))
def bmin(x, y): return GVExpr('bmin', [x,y], ensure_same_dim(x,y))
def bmax(x, y): return GVExpr('bmax', [x,y], ensure_same_dim(x,y))
def transpose(x): return GVExpr('transpose', [x], make_dim(x.dim[1], x.dim[0]) if x.dim.isvalid() else InvalidDim)
def sum_cols(x): return GVExpr('sum_cols', [x], make_dim(x.dim[0],1) if x.dim.isvalid() else InvalidDim)
def sum_batches(x): return GVExpr('sum_batches', [x], copy_dim(x))
#expr-opt
def fold_rows(x, nrows=2):
if x.dim.invalid():
d = InvalidDim
elif x.dim[0] != nrows:
d = InvalidConcreteDim(x.dim, nrows)
else:
d = make_dim(1, x.dim[1])
return GVExpr('fold_rows', [x,nrows], d)
def pairwise_rank_loss(x, y, m=1.0): return GVExpr('pairwise_rank_loss', [x,y,m], ensure_same_dim(x,y))
def poisson_loss(x, y): return GVExpr('poisson_loss', [x,y], copy_dim(x))
def huber_distance(x, y, c=1.345): return GVExpr('huber_distance', [x,y,c], ensure_same_dim(x,y))
#expr-unsigned
def kmax_pooling(x, k, d=1): return GVExpr('kmax_pooling', [x,k,d], make_dim(x.dim[0], k) if x.dim.isvalid() else InvalidDim)
def pickneglogsoftmax(x, v): return GVExpr('pickneglogsoftmax', [x,v], make_dim(1, inferred=True))
def pickneglogsoftmax_batch(x, vs): return GVExpr('pickneglogsoftmax_batch', [x,vs], make_dim(len(vs), inferred=True))
def kmh_ngram(x, n): return GVExpr('kmh_ngram', [x,n], make_dim(x.dim[0], x.dim[1]-n+1) if x.dim.isvalid() else InvalidDim)
def pickrange(x, v, u): return GVExpr('pickrange', [x,v,u], make_dim(u-v, x.dim[1]) if x.dim.isvalid() else InvalidDim)
#expr-float
def noise(x, stddev): return GVExpr('noise', [x,stddev], copy_dim(x))
def dropout(x, p): return GVExpr('dropout', [x,p], copy_dim(x))
def block_dropout(x, p): return GVExpr('block_dropout', [x,p], copy_dim(x))
#expr-dim
def reshape(x, d): return GVExpr('reshape', [x,d], make_dim(d))
def esum(xs): return GVExpr('esum', xs, ensure_all_same_dim(xs))
def average(xs): return GVExpr('average', xs, ensure_all_same_dim(xs))
def emax(xs): return GVExpr('emax', xs, ensure_all_same_dim(xs))
def concatenate_cols(xs):
if any(x.dim.invalid() for x in xs):
dim = InvalidDim
else:
nrows = xs[0].dim[0]
ncols = xs[0].dim[1]
for x in xs:
ncols += x.dim[1]
nrows = nrows if nrows == x.dim[0] else -1
dim = make_dim(nrows, ncols) if nrows >= 0 else InvalidDim
return GVExpr('concatenate_cols', xs, dim)
def concatenate(xs):
if any(x.dim.invalid() for x in xs):
dim = InvalidDim
else:
nrows = xs[0].dim[0]
ncols = xs[0].dim[1]
for x in xs[1:]:
nrows += x.dim[0]
ncols = ncols if ncols == x.dim[1] else -1
dim = make_dim(nrows, ncols) if ncols >= 0 else InvalidDim
return GVExpr('concatenate', xs, dim)
def affine_transform(xs):
if any(x.dim.invalid() for x in xs):
dim = InvalidDim
elif all(ensure_mul_dim(a,b)==xs[0].dim for a,b in zip(xs[1::2],xs[2::2])):
dim = xs[0].dim
else:
dim = InvalidDim
return GVExpr('affine_transform', xs, dim)
builder_num = -1
def new_builder_num():
global builder_num
builder_num += 1
return builder_num
class _RNNBuilder(object):
def set_dropout(self, f): pass
def disable_dropout(self): pass
def new_graph(self):
self.cg_version = _cg.version()
self.builder_version = new_builder_num()
def start_new_sequence(self, es=None):
if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.")
def add_input(self, e):
ensure_freshness(e)
if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.")
return Expression.from_cexpr(self.cg_version, self.thisptr.add_input(e.c()))
def add_input_to_prev(self, prev, e):
ensure_freshness(e)
if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.")
return Expression.from_cexpr(self.cg_version, self.thisptr.add_input(prev, e.c()))
def rewind_one_step(self):
if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.")
self.thisptr.rewind_one_step()
def back(self):
if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.")
return Expression.from_cexpr(self.cg_version, self.thisptr.back())
def final_h(self):
if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.")
res = []
#def CExpression cexp
cexps = self.thisptr.final_h()
for cexp in cexps:
res.append(Expression.from_cexpr(self.cg_version, cexp))
return res
def final_s(self):
if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.")
res = []
#def CExpression cexp
cexps = self.thisptr.final_s()
for cexp in cexps:
res.append(Expression.from_cexpr(self.cg_version, cexp))
return res
def get_h(self, i):
if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.")
res = []
#def CExpression cexp
cexps = self.thisptr.get_h(i)
for cexp in cexps:
res.append(Expression.from_cexpr(self.cg_version, cexp))
return res
def get_s(self, i):
if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.")
res = []
#def CExpression cexp
cexps = self.thisptr.get_s(i)
for cexp in cexps:
res.append(Expression.from_cexpr(self.cg_version, cexp))
return res
def initial_state(self,vecs=None):
if self._init_state is None or self.cg_version != _cg.version():
self.new_graph()
if vecs is not None:
self.start_new_sequence(vecs)
else:
self.start_new_sequence()
self._init_state = RNNState(self, -1)
return self._init_state
def initial_state_from_raw_vectors(self,vecs=None):
if self._init_state is None or self.cg_version != _cg.version():
self.new_graph()
if vecs is not None:
es = []
for v in vecs:
e = vecInput(len(v))
e.set(v)
es.append(e)
self.start_new_sequence(es)
else:
self.start_new_sequence()
self._init_state = RNNState(self, -1)
return self._init_state
class SimpleRNNBuilder(_RNNBuilder):
def __init__(self, layers, input_dim, hidden_dim, model):
self.cg_version = -1
self.layers = layers
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.model = model
self._init_state = None
self.builder_version = new_builder_num()
def whoami(self): return "SimpleRNNBuilder"
class GRUBuilder(_RNNBuilder):
def __init__(self, layers, input_dim, hidden_dim, model):
self.cg_version = -1
self.layers = layers
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.model = model
self._init_state = None
self.builder_version = new_builder_num()
def whoami(self): return "GRUBuilder"
class LSTMBuilder(_RNNBuilder):
def __init__(self, layers, input_dim, hidden_dim, model):
self.cg_version = -1
self.layers = layers
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.model = model
self._init_state = None
self.builder_version = new_builder_num()
def whoami(self): return "LSTMBuilder"
class FastLSTMBuilder(_RNNBuilder):
def __init__(self, layers, input_dim, hidden_dim, model):
self.cg_version = -1
self.layers = layers
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.model = model
self._init_state = None
self.builder_version = new_builder_num()
def whoami(self): return "FastLSTMBuilder"
class BiRNNBuilder(object):
"""
Builder for BiRNNs that delegates to regular RNNs and wires them together.
builder = BiRNNBuilder(1, 128, 100, model, LSTMBuilder)
[o1,o2,o3] = builder.transduce([i1,i2,i3])
"""
def __init__(self, num_layers, input_dim, hidden_dim, model, rnn_builder_factory):
"""
@param num_layers: depth of the BiRNN
@param input_dim: size of the inputs
@param hidden_dim: size of the outputs (and intermediate layer representations)
@param model
@param rnn_builder_factory: RNNBuilder subclass, e.g. LSTMBuilder
"""
assert num_layers > 0
assert hidden_dim % 2 == 0
self.builder_layers = []
f = rnn_builder_factory(1, input_dim, hidden_dim/2, model)
b = rnn_builder_factory(1, input_dim, hidden_dim/2, model)
self.builder_layers.append((f,b))
for _ in xrange(num_layers-1):
f = rnn_builder_factory(1, hidden_dim, hidden_dim/2, model)
b = rnn_builder_factory(1, hidden_dim, hidden_dim/2, model)
self.builder_layers.append((f,b))
def whoami(self): return "BiRNNBuilder"
def set_dropout(self, p):
for (fb,bb) in self.builder_layers:
fb.set_dropout(p)
bb.set_dropout(p)
def disable_dropout(self):
for (fb,bb) in self.builder_layers:
fb.disable_dropout()
bb.disable_dropout()
def add_inputs(self, es):
"""
returns the list of state pairs (stateF, stateB) obtained by adding
inputs to both forward (stateF) and backward (stateB) RNNs.
@param es: a list of Expression
see also transduce(xs)
.transduce(xs) is different from .add_inputs(xs) in the following way:
.add_inputs(xs) returns a list of RNNState pairs. RNNState objects can be
queried in various ways. In particular, they allow access to the previous
state, as well as to the state-vectors (h() and s() )
.transduce(xs) returns a list of Expression. These are just the output
expressions. For many cases, this suffices.
transduce is much more memory efficient than add_inputs.
"""
for e in es:
ensure_freshness(e)
for (fb,bb) in self.builder_layers[:-1]:
fs = fb.initial_state().transduce(es)
bs = bb.initial_state().transduce(reversed(es))
es = [concatenate([f,b]) for f,b in zip(fs, reversed(bs))]
(fb,bb) = self.builder_layers[-1]
fs = fb.initial_state().add_inputs(es)
bs = bb.initial_state().add_inputs(reversed(es))
return [(f,b) for f,b in zip(fs, reversed(bs))]
def transduce(self, es):
"""
returns the list of output Expressions obtained by adding the given inputs
to the current state, one by one, to both the forward and backward RNNs,
and concatenating.
@param es: a list of Expression
see also add_inputs(xs)
.transduce(xs) is different from .add_inputs(xs) in the following way:
.add_inputs(xs) returns a list of RNNState pairs. RNNState objects can be
queried in various ways. In particular, they allow access to the previous
state, as well as to the state-vectors (h() and s() )
.transduce(xs) returns a list of Expression. These are just the output
expressions. For many cases, this suffices.
transduce is much more memory efficient than add_inputs.
"""
for e in es:
ensure_freshness(e)
for (fb,bb) in self.builder_layers:
fs = fb.initial_state().transduce(es)
bs = bb.initial_state().transduce(reversed(es))
es = [concatenate([f,b]) for f,b in zip(fs, reversed(bs))]
return es
class RNNState(object): # {{{
def __init__(self, builder, state_idx=-1, prev_state=None, out=None):
self.builder = builder
self.state_idx=state_idx
self._prev = prev_state
self._out = out
def add_input(self, x): # x: Expression
input_dim = make_dim(self.builder.input_dim)
input_dim = x.dim if x.dim==input_dim else InvalidConcreteDim(x.dim, input_dim)
rnn_type = self.builder.whoami()
if rnn_type.endswith("Builder"): rnn_type = rnn_type[:-len("Builder")]
output_e = GVExpr('RNNState', [x, input_dim, rnn_type, self.builder.builder_version, self.state_idx+1], dim=make_dim(self.builder.hidden_dim))
new_state = RNNState(self.builder, self.state_idx+1, self, output_e)
return new_state
def add_inputs(self, xs):
if self._prev is None:
self.builder.builder_version = new_builder_num()
states = []
cur = self
for x in xs:
cur = cur.add_input(x)
states.append(cur)
return states
def transduce(self, xs):
return [x.output() for x in self.add_inputs(xs)]
def output(self): return self._out
def prev(self): return self._prev
def b(self): return self.builder
def get_state_idx(self): return self.state_idx
# StackedRNNState TODO: do at least minimal testing for this #{{{
class StackedRNNState(object):
#def list states
#def StackedRNNState prev
def __init__(self, states, prev=None):
self.states = states
self.prev = prev
def add_input(self, x):
#def next_states
next_states = []
for s in self.states:
next_states.append(s.add_input(x))
x = next_states[-1].output()
return StackedRNNState(next_states, self)
def output(self): return self.states[-1].output()
def prev(self): return self.prev
def h(self): return [s.h() for s in self.states]
def s(self): return [s.s() for s in self.states]
def add_inputs(self, xs):
"""
returns the list of states obtained by adding the given inputs
to the current state, one by one.
"""
states = []
cur = self
for x in xs:
cur = cur.add_input(x)
states.append(cur)
return states
class Trainer(object):
def update(self, s=1.0): pass
def update_epoch(self, r = 1.0): pass
def status(self): pass
def set_clip_threshold(self, thr): pass
def get_clip_threshold(self): pass
class SimpleSGDTrainer(Trainer):
"""
This object is very cool!
"""
def __init__(self, m, e0 = 0.1, *args): pass
class MomentumSGDTrainer(Trainer):
def __init__(self, m, e0 = 0.01, mom = 0.9, *args): pass
class AdagradTrainer(Trainer):
def __init__(self, m, e0 = 0.1, eps = 1e-20, *args): pass
class AdadeltaTrainer(Trainer):
def __init__(self, m, eps = 1e-6, rho = 0.95, *args): pass
class AdamTrainer(Trainer):
def __init__(self, m, alpha = 0.001, beta_1 = 0.9, beta_2 = 0.999, eps = 1e-8, *args ): pass
class Initializer(object): pass
class NormalInitializer(Initializer):
def __init__(self, mean=0, var=1): pass
class UniformInitializer(Initializer):
def __init__(self, scale): pass
class ConstInitializer(Initializer):
def __init__(self, c): pass
class GlorotInitializer(Initializer):
def __init__(self, is_lookup=False): pass
class FromFileInitializer(Initializer):
def __init__(self, fname): pass
class NumpyInitializer(Initializer):
def __init__(self, array): pass
def shape_str(e_dim):
if e_dim.invalid():
#return '{??}'
return str(e_dim)
elif e_dim.inferred:
if e_dim[1] == 1:
return '{%s}' % (e_dim[0])
else:
return '{%s,%s}' % (e_dim[0],e_dim[1])
else:
if e_dim[1] == 1:
return '{{%s}}' % (e_dim[0])
else:
return '{{%s,%s}}' % (e_dim[0],e_dim[1])
class GVNode(object):
def __init__(self, name, input_dim, label, output_dim, children, features, node_type, expr_name):
self.name = name
self.input_dim = input_dim
self.label = label
self.output_dim = output_dim
self.children = children
self.features = features
self.node_type = node_type
self.expr_name = expr_name
def __iter__(self): return iter([self.name, self.input_dim, self.label, self.output_dim, self.children, self.features, self.node_type, self.expr_name])
def __repr__(self): return 'GVNode(%s)' % ', '.join(map(str, self))
def __str__(self): return repr(self)
def __lt__(self, other): return id(self) < id(other)
def make_network_graph(compact, expression_names, lookup_names):
"""
Make a network graph, represented as of nodes and a set of edges.
The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string)
# The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)]
"""
nodes = set()
# edges = defaultdict(set) # parent -> (child, extra)
var_name_dict = dict()
if expression_names:
for e in graphviz_items: # e: Expression
if e in expression_names:
var_name_dict[e.vindex] = expression_names[e]
rnn_bldr_name = defaultdict(lambda: chr(len(rnn_bldr_name)+ord('A')))
def vidx2str(vidx): return '%s%s' % ('N', vidx)
for e in graphviz_items: # e: Expression
vidx = e.vindex
f_name = e.name
args = e.args
output_dim = e.dim
input_dim = None # basically just RNNStates use this since everything else has input_dim==output_dim
children = set()
node_type = '2_regular'
if f_name == 'vecInput':
[_dim] = args
arg_strs = []
elif f_name == 'inputVector':
[_v] = args
arg_strs = []
elif f_name == 'matInput':
[_d1, _d2] = args
arg_strs = []
elif f_name == 'inputMatrix':
[_v, _d] = args
arg_strs = []
elif f_name == 'parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup_parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup':
[p, idx, update] = args
[_dim] = p.args
if vidx in var_name_dict:
name = var_name_dict[vidx]
else:
name = None
item_name = None
if lookup_names and p in expression_names:
param_name = expression_names[p]
if param_name in lookup_names:
item_name = '\\"%s\\"' % (lookup_names[param_name][idx],)
if compact:
if item_name is not None:
f_name = item_name
elif name is not None:
f_name = '%s[%s]' % (name, idx)
else:
f_name = 'lookup(%s)' % (idx)
arg_strs = []
else:
arg_strs = [var_name_dict.get(p.vindex, 'v%d' % (p.vindex))]
if item_name is not None:
arg_strs.append(item_name)
vocab_size = _dim[0]
arg_strs.extend(['%s' % (idx), '%s' % (vocab_size), 'update' if update else 'fixed'])
#children.add(vidx2str(p.vindex))
#node_type = '1_param'
elif f_name == 'RNNState':
[arg, input_dim, bldr_type, bldr_num, state_idx] = args # arg==input_e
rnn_name = rnn_bldr_name[bldr_num]
if bldr_type.endswith('Builder'):
bldr_type[:-len('Builder')]
f_name = '%s-%s-%s' % (bldr_type, rnn_name, state_idx)
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs = [s]
else:
arg_strs = []
children.add(vidx2str(arg.vindex))
node_type = '3_rnn_state'
else:
arg_strs = []
for arg in args:
if isinstance(arg, Expression):
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs.append(s)
children.add(vidx2str(arg.vindex))
elif isinstance(arg, float) and compact:
s = re.sub('0+$', '', '%.3f' % (arg))
if s == '0.':
s = str(arg)
arg_strs.append(s)
else:
arg_strs.append(str(arg))
# f_name = { ,
# }.get(f_name, f_name)
if compact:
f_name = { 'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'cadd': '+',
'cmul': '*',
'cdiv': '/',
'scalarsub': '-',
'concatenate': 'cat',
'esum': 'sum',
'emax': 'max',
'emin': 'min',
}.get(f_name, f_name)
if arg_strs:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
elif f_name == 'add':
[a,b] = arg_strs
str_repr = '%s + %s' % (a,b)
elif f_name == 'sub':
[a,b] = arg_strs
str_repr = '%s - %s' % (a,b)
elif f_name == 'mul':
[a,b] = arg_strs
str_repr = '%s * %s' % (a,b)
elif f_name == 'div':
[a,b] = arg_strs
str_repr = '%s / %s' % (a,b)
elif f_name == 'neg':
[a,] = arg_strs
str_repr = '-%s' % (a)
elif f_name == 'affine_transform':
str_repr = arg_strs[0]
for i in xrange(1, len(arg_strs), 2):
str_repr += ' + %s*%s' % tuple(arg_strs[i:i+2])
else:
if arg_strs is not None:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
name = vidx2str(vidx)
var_name = '%s' % (var_name_dict.get(vidx, 'v%d' % (vidx))) if not compact else ''
# if show_dims:
# str_repr = '%s\\n%s' % (shape_str(e.dim), str_repr)
label = str_repr
if not compact:
label = '%s = %s' % (var_name, label)
features = ''
# if output_dim.invalid():
# features += " [color=red,style=filled,fillcolor=red]"
# node_def_lines.append(' %s [label="%s%s"] %s;' % (vidx2str(vidx), label_prefix, str_repr, ''))
expr_name = expression_names[e] if compact and expression_names and (e in expression_names) and (expression_names[e] != f_name) else None
nodes.add(GVNode(name, input_dim, label, output_dim, frozenset(children), features, node_type, expr_name))
return nodes
def parents_of(n, nodes):
ps = []
for n in nodes:
for c in n.children:
if n in c.children:
ps.append
return ps
def collapse_birnn_states(nodes, compact):
node_info = {n.name:n for n in nodes}
new_nodes = []
children_forwards = dict() # if `n.children` is pointing to K, return V instead
rnn_state_nodes = []
rnn_parents = defaultdict(set) # rnn_state_node -> [parent_expression]
rnn_children = {} # rnn_state_node -> [child_expression]
shared_rnn_states = defaultdict(set) # (input name, output name) -> [(rnn state name)]
rnn_groups = dict() # these nodes (keys) are being replaced by the new group nodes (values)
nodes_to_delete = set()
for n in nodes:
for c in n.children:
if node_info[c].node_type == '3_rnn_state':
rnn_parents[node_info[c].name].add(n.name)
if n.node_type == '3_rnn_state':
rnn_state_nodes.append(n)
rnn_children[n.name] = set(node_info[c].name for c in n.children)
for n in rnn_state_nodes:
in_e, = rnn_children[n.name]
out_e, = rnn_parents[n.name]
shared_rnn_states[(in_e, out_e)].add(n)
for ((in_e, out_e), ns) in shared_rnn_states.iteritems():
input_dims = set(n.input_dim for n in ns)
output_dims = set(n.output_dim for n in ns)
if len(ns) > 1 and len(input_dims)==1 and len(output_dims)==1:
input_dim, = input_dims
output_dim, = output_dims
new_rnn_group_state_name = ''.join(n.name for n in sorted(ns))
new_rnn_group_state_label = '\\n'.join(n.label for n in sorted(ns))
if not compact:
new_rnn_group_state_label = '%s\\n%s' % (node_info[out_e].label, new_rnn_group_state_label)
cat_output_dim = make_dim(output_dim[0]*2, output_dim[1])
new_rnn_group_state = GVNode(new_rnn_group_state_name, input_dim, new_rnn_group_state_label, cat_output_dim, frozenset([in_e]), '', '3_rnn_state', node_info[out_e].expr_name)
for n in ns:
rnn_groups[n.name] = new_rnn_group_state.name
# children_forwards[n.name] = new_rnn_group_state.name
nodes_to_delete.add(n.name)
children_forwards[out_e] = new_rnn_group_state.name
nodes.add(new_rnn_group_state)
nodes_to_delete.add(out_e)
# TODO: WHEN WE DELETE A CAT NODE, MAKE SURE WE FORWARD TO THE **NEW GROUP STATE NODE**
for (name, input_dim, label, output_dim, children, features, node_type, expr_name) in nodes:
if name not in nodes_to_delete:
new_children = []
for c in children:
while c in children_forwards:
c = children_forwards[c]
new_children.append(c)
new_nodes.append(GVNode(name, input_dim, label, output_dim, new_children, features, node_type, expr_name))
return (new_nodes, rnn_groups)
def print_graphviz(compact=False, show_dims=True, expression_names=None, lookup_names=None, collapse_birnns=False):
original_nodes = make_network_graph(compact, expression_names, lookup_names)
nodes = original_nodes
collapse_to = dict()
if collapse_birnns:
(nodes, birnn_collapse_to) = collapse_birnn_states(nodes, compact)
collapse_to.update(birnn_collapse_to)
print('digraph G {')
print(' rankdir=BT;')
if not compact: print(' nodesep=.05;')
node_types = defaultdict(set)
for n in nodes:
node_types[n.node_type].add(n.name)
for node_type in sorted(node_types):
style = {
'1_param': '[shape=ellipse]',
'2_regular': '[shape=rect]',
'3_rnn_state': '[shape=rect, peripheries=2]',
}[node_type]
print(' node %s; ' % (style), ' '.join(node_types[node_type]))
# all_nodes = set(line.strip().split()[0] for line in node_def_lines)
for n in nodes:
label = n.label
if show_dims:
if n.expr_name is not None:
label = '%s\\n%s' % (n.expr_name, label)
label = '%s\\n%s' % (shape_str(n.output_dim), label)
if n.input_dim is not None:
label = '%s\\n%s' % (label, shape_str(n.input_dim))
if n.output_dim.invalid() or (n.input_dim is not None and n.input_dim.invalid()):
n.features += " [color=red,style=filled,fillcolor=red]"
print(' %s [label="%s"] %s;' % (n.name, label, n.features))
for c in n.children:
print(' %s -> %s;' % (c, n.name))
rnn_states = [] # (name, rnn_name, state_idx)
rnn_state_re = re.compile("[^-]+-(.)-(\\d+)")
for n in original_nodes:
if n.node_type == '3_rnn_state':
m = rnn_state_re.search(n.label)
assert m is not None, 'rnn_state_re.search(%s); %s' % (n.label, n)
(rnn_name, state_idx) = m.groups()
rnn_states.append((rnn_name, int(state_idx), n.name))
rnn_states = sorted(rnn_states)
edges = set()
for ((rnn_name_p, state_idx_p, name_p), (rnn_name_n, state_idx_n, name_n)) in zip(rnn_states,rnn_states[1:]):
if rnn_name_p == rnn_name_n:
if state_idx_p+1 == state_idx_n:
group_name_p = collapse_to.get(name_p, name_p)
group_name_n = collapse_to.get(name_n, name_n)
edges.add((group_name_p, group_name_n))
for (name_p, name_n) in edges:
print(' %s -> %s [style=dotted];' % (name_p, name_n)) # ,dir=both
print('}')
| 39,058 | 35.640713 | 188 | py |
dynet | dynet-master/python/model_test.py | """
Tests for model saving and loading, including for user-defined models.
"""
from __future__ import print_function
import dynet as dy
import numpy
import os
# first, define three user-defined classes
class Transfer(Saveable):
def __init__(self, nin, nout, act, model):
self.act = act
self.W = model.add_parameters((nout, nin))
self.b = model.add_parameters(nout)
self.nin = nin
self.nout = nout
def __call__(self, x):
W,b=map(dy.parameter, [self.W, self.b])
return self.act(W*x+b)
def get_components(self):
return [self.W, self.b]
def restore_components(self, components):
self.W, self.b = components
class MultiTransfer(Saveable):
def __init__(self, sizes, act, model):
self.transfers = []
for nin,nout in zip(sizes,sizes[1:]):
self.transfers.append(Transfer(nin,nout,act,model))
def __call__(self, x):
for t in self.transfers:
x = t(x)
return x
def get_components(self):
return self.transfers
def restore_components(self, components):
self.transfers = components
class NoParameters(Saveable):
def __init__(self, act):
self.act = act
def __call__(self, in_expr):
return self.act(dy.cmult(in_expr))
def get_components(self): return []
def restore_components(self,components):pass
def old_style_save_and_load():
# create a model and add parameters.
m = dy.Model()
a = m.add_parameters((100,100))
b = m.add_lookup_parameters((20,2))
t1 = Transfer(5,6,dy.softmax, m)
t2 = Transfer(7,8,dy.softmax, m)
tt = MultiTransfer([10,10,10,10],dy.tanh, m)
c = m.add_parameters((100))
lb = dy.LSTMBuilder(1,2,3,m)
lb2 = dy.LSTMBuilder(2,4,4,m)
# save
m.save("test1")
# create new model (same parameters):
m2 = dy.Model()
a2 = m2.add_parameters((100,100))
b2 = m2.add_lookup_parameters((20,2))
t12 = Transfer(5,6,dy.softmax, m2)
t22 = Transfer(7,8,dy.softmax, m2)
tt2 = MultiTransfer([10,10,10,10],dy.tanh, m2)
c2 = m2.add_parameters((100))
lb2 = dy.LSTMBuilder(1,2,3,m2)
lb22 = dy.LSTMBuilder(2,4,4,m2)
# parameters should be different
for p1,p2 in [(a,a2),(b,b2),(c,c2),(t1.W,t12.W),(tt.transfers[0].W,tt2.transfers[0].W)]:
assert(not numpy.array_equal(p1.as_array(), p2.as_array()))
m2.load("test1")
# parameters should be same
for p1,p2 in [(a,a2),(b,b2),(c,c2),(t1.W,t12.W),(tt.transfers[0].W,tt2.transfers[0].W)]:
assert(numpy.array_equal(p1.as_array(), p2.as_array()))
os.remove("test1")
old_style_save_and_load()
def new_style_save_and_load():
# create a model and add parameters.
m = dy.Model()
a = m.add_parameters((100,100))
b = m.add_lookup_parameters((20,2))
t1 = Transfer(5,6,dy.softmax, m)
t2 = Transfer(7,8,dy.softmax, m)
tt = MultiTransfer([10,10,10,10],dy.tanh, m)
c = m.add_parameters((100))
lb = dy.LSTMBuilder(1,2,3,m)
lb2 = dy.LSTMBuilder(2,4,4,m)
np = NoParameters(dy.tanh)
# save
m.save("test_new",[a,b,t1,t2,tt,c,lb,lb2,np])
m.save("test_new_r",[np,lb2,lb,c,tt,t2,t1,b,a])
# create new model and load:
m2 = dy.Model()
[xa,xb,xt1,xt2,xtt,xc,xlb,xlb2,xnp] = m2.load("test_new")
#m3 = dy.Model()
#[rnp,rlb2,rlb,rc,rtt,rt2,rt1,rb,ra] = m3.load("test_new_r")
m3,[rnp,rlb2,rlb,rc,rtt,rt2,rt1,rb,ra] = dy.Model.from_file("test_new_r")
# partial save and load:
m.save("test_new_partial", [a,tt,lb2])
m4 = dy.Model()
[pa,ptt,plb2] = m4.load("test_new_partial")
# types
params = [a,xa,ra,pa,c,xc,rc]
for p1 in params:
assert(isinstance(p1,dy.Parameters))
for p1 in [b,xb,rb]:
assert(isinstance(p1,dy.LookupParameters))
for p1 in [lb,lb2,xlb,xlb2,rlb,rlb2,plb2]:
assert(isinstance(p1,dy.LSTMBuilder))
for p1 in [t1,t2,xt1,xt2,rt1,rt2]:
assert(isinstance(p1,Transfer))
for p1 in [tt,xtt,rtt,ptt]:
assert(isinstance(p1,MultiTransfer))
for p1 in [np,xnp,rnp]:
assert(isinstance(p1,NoParameters))
# param equalities
for p1 in [a,xa,ra,pa]:
for p2 in [a,xa,ra,pa]:
assert(numpy.array_equal(p1.as_array(),p2.as_array()))
for p1 in [c,xc,rc]:
for p2 in [c,xc,rc]:
assert(numpy.array_equal(p1.as_array(),p2.as_array()))
for p1 in [b,xb,rb]:
for p2 in [b,xb,rb]:
assert(numpy.array_equal(p1.as_array(),p2.as_array()))
v1 = b[4]
v2 = xb[4]
v3 = rb[4]
assert(numpy.array_equal(v1.value(), v2.value()))
assert(numpy.array_equal(v1.value(), v3.value()))
# lstm builders equalities
s1 = lb.initial_state()
s2 = xlb.initial_state()
s3 = rlb.initial_state()
y1 = s1.add_input(v1).output().value()
y2 = s2.add_input(v1).output().value()
y3 = s3.add_input(v1).output().value()
for y in [y2,y3]:
assert(numpy.array_equal(y1,y))
# Transfer equalities
for p1 in [t1,xt1,rt1]:
for p2 in [t1,xt1,rt1]:
assert(numpy.array_equal(p1.W.as_array(),p2.W.as_array()))
assert(numpy.array_equal(p1.b.as_array(),p2.b.as_array()))
assert(p1.nin == p2.nin)
# MultiTransfer equalities
for p1 in [tt,xtt,rtt]:
for p2 in [tt,xtt,rtt]:
assert(numpy.array_equal(p1.transfers[0].W.as_array(),p2.transfers[0].W.as_array()))
assert(numpy.array_equal(p1.transfers[0].b.as_array(),p2.transfers[0].b.as_array()))
assert(numpy.array_equal(p1.transfers[-1].W.as_array(),p2.transfers[-1].W.as_array()))
assert(numpy.array_equal(p1.transfers[-1].b.as_array(),p2.transfers[-1].b.as_array()))
assert(p1.transfers[0].nin == p2.transfers[0].nin)
assert(p1.transfers[-1].nin == p2.transfers[-1].nin)
# NoParameter equalities
for p1 in [np,xnp,rnp]:
assert(p1.act == dy.tanh)
for suf in ['','.pyk','.pym']:
os.remove("test_new"+suf)
os.remove("test_new_r"+suf)
os.remove("test_new_partial"+suf)
new_style_save_and_load()
print("Model saving tests passed.")
| 6,174 | 30.829897 | 98 | py |
dynet | dynet-master/tests/python/test.py | import dynet as dy
import numpy as np
import unittest
import gc
def npvalue_callable(x):
return x.npvalue()
def gradient_callable(x):
return x.gradient()
class TestInput(unittest.TestCase):
def setUp(self):
self.input_vals = np.arange(81)
self.squared_norm = (self.input_vals**2).sum()
self.shapes = [(81,), (3, 27), (3, 3, 9), (3, 3, 3, 3)]
def test_inputTensor_not_batched(self):
for i in range(4):
dy.renew_cg()
input_tensor = self.input_vals.reshape(self.shapes[i])
x = dy.inputTensor(input_tensor)
self.assertEqual(x.dim()[0], self.shapes[i],
msg="Dimension mismatch")
self.assertEqual(x.dim()[1], 1,
msg="Dimension mismatch")
self.assertTrue(
np.allclose(x.npvalue(), input_tensor),
msg="Expression value different from initial value"
)
self.assertEqual(
dy.squared_norm(x).scalar_value(), self.squared_norm,
msg="Value mismatch"
)
def test_sparse_inputTensor(self):
dy.renew_cg()
input_tensor = self.input_vals.reshape((3, 3, 3, 3))
input_vals = [input_tensor[0, 0, 0, 0], input_tensor[0, 1, 2, 0]]
input_indices = ([0, 0], [0, 1], [0, 2], [0, 0])
x = dy.sparse_inputTensor(
input_indices, input_vals, (3, 3, 3, 3), batched=True)
self.assertEqual(x.dim()[0], (3, 3, 3),
msg="Dimension mismatch")
self.assertEqual(x.dim()[1], 3,
msg="Dimension mismatch")
self.assertTrue(np.allclose(x.npvalue()[0, 0, 0, 0], input_vals[0]),
msg="Expression value different from initial value")
self.assertTrue(np.allclose(x.npvalue()[0, 1, 2, 0], input_vals[1]),
msg="Expression value different from initial value")
self.assertTrue(np.allclose(x.npvalue()[1, 1, 1, 1], 0),
msg="Expression value different from initial value")
def test_inputTensor_batched(self):
for i in range(4):
dy.renew_cg()
input_tensor = self.input_vals.reshape(self.shapes[i])
xb = dy.inputTensor(input_tensor, batched=True)
self.assertEqual(
xb.dim()[0],
(self.shapes[i][:-1] if i > 0 else (1,)),
msg="Dimension mismatch"
)
self.assertEqual(
xb.dim()[1],
self.shapes[i][-1],
msg="Dimension mismatch"
)
self.assertTrue(
np.allclose(xb.npvalue(), input_tensor),
msg="Expression value different from initial value"
)
self.assertEqual(
dy.sum_batches(dy.squared_norm(xb)).scalar_value(),
self.squared_norm,
msg="Value mismatch"
)
def test_inputTensor_batched_list(self):
for i in range(4):
dy.renew_cg()
input_tensor = self.input_vals.reshape(self.shapes[i])
xb = dy.inputTensor([np.asarray(x).transpose()
for x in input_tensor.transpose()])
self.assertEqual(
xb.dim()[0],
(self.shapes[i][:-1] if i > 0 else (1,)),
msg="Dimension mismatch"
)
self.assertEqual(
xb.dim()[1],
self.shapes[i][-1],
msg="Dimension mismatch"
)
self.assertTrue(
np.allclose(xb.npvalue(), input_tensor),
msg="Expression value different from initial value"
)
self.assertEqual(
dy.sum_batches(dy.squared_norm(xb)).scalar_value(),
self.squared_norm,
msg="Value mismatch"
)
def test_inputTensor_except(self):
dy.renew_cg()
self.assertRaises(TypeError, dy.inputTensor, batched=True)
class TestParameters(unittest.TestCase):
def setUp(self):
# Create model
self.m = dy.ParameterCollection()
# Parameters
self.p1 = self.m.add_parameters((10, 10), init=dy.ConstInitializer(1))
self.p2 = self.m.add_parameters((10, 10), init=dy.ConstInitializer(1))
self.lp1 = self.m.add_lookup_parameters(
(10, 10), init=dy.ConstInitializer(1))
self.lp2 = self.m.add_lookup_parameters(
(10, 10), init=dy.ConstInitializer(1))
# Trainer
self.trainer = dy.SimpleSGDTrainer(self.m, learning_rate=0.1)
self.trainer.set_clip_threshold(-1)
def test_list(self):
[p1, p2] = self.m.parameters_list()
[lp1, lp2] = self.m.lookup_parameters_list()
def test_shape(self):
shape = (10, 5, 2)
lp = self.m.add_lookup_parameters(shape)
lp_shape = lp.shape()
self.assertEqual(shape[0], lp_shape[0])
self.assertEqual(shape[1], lp_shape[1])
self.assertEqual(shape[2], lp_shape[2])
def test_as_array(self):
# Values
self.p1.as_array()
self.lp1.as_array()
self.lp1.row_as_array(0)
self.lp1.rows_as_array([5, 6, 9])
# Gradients
self.p1.grad_as_array()
self.lp1.as_array()
self.lp1.row_grad_as_array(0)
self.lp1.rows_grad_as_array([5, 6, 9])
def test_grad(self):
# add parameter
p = self.m.parameters_from_numpy(np.arange(5))
# create cg
dy.renew_cg()
# input tensor
x = dy.inputTensor(np.arange(5).reshape((1, 5)))
# compute dot product
res = x * p
# Run forward and backward pass
res.forward()
res.backward()
# Should print the value of x
self.assertTrue(np.allclose(p.grad_as_array(),
x.npvalue()), msg="Gradient is wrong")
def test_set_value(self):
# add parameter
p = self.m.add_parameters((2, 3), init=dy.ConstInitializer(1))
value_to_set = np.arange(6).reshape(2, 3)
# set the value
p.set_value(value_to_set)
self.assertTrue(np.allclose(p.as_array(), value_to_set))
def test_is_updated(self):
self.assertTrue(self.p1.is_updated())
self.assertTrue(self.p2.is_updated())
self.assertTrue(self.lp1.is_updated())
self.assertTrue(self.lp2.is_updated())
def test_set_updated(self):
self.p2.set_updated(False)
self.lp1.set_updated(False)
self.assertTrue(self.p1.is_updated())
self.assertFalse(self.p2.is_updated())
self.assertFalse(self.lp1.is_updated())
self.assertTrue(self.lp2.is_updated())
self.p1.set_updated(True)
self.p2.set_updated(False)
self.lp1.set_updated(False)
self.lp2.set_updated(True)
self.assertTrue(self.p1.is_updated())
self.assertFalse(self.p2.is_updated())
self.assertFalse(self.lp1.is_updated())
self.assertTrue(self.lp2.is_updated())
self.p1.set_updated(False)
self.p2.set_updated(True)
self.lp1.set_updated(True)
self.lp2.set_updated(False)
self.assertFalse(self.p1.is_updated())
self.assertTrue(self.p2.is_updated())
self.assertTrue(self.lp1.is_updated())
self.assertFalse(self.lp2.is_updated())
dy.renew_cg()
a = self.p1 * self.lp1[1]
b = self.p2 * self.lp2[1]
loss = dy.dot_product(a, b) / 100
loss.backward()
self.trainer.update()
ones = np.ones((10, 10))
self.assertTrue(np.allclose(self.p1.as_array(), ones),
msg=np.array_str(self.p1.as_array()))
self.assertTrue(np.allclose(self.lp2.as_array()[1], ones[
0]), msg=np.array_str(self.lp2.as_array()))
def test_update(self):
ones = np.ones((10, 10))
dy.renew_cg()
a = self.p1 * self.lp1[1]
b = self.p2 * self.lp2[1]
loss = dy.dot_product(a, b) / 100
self.assertEqual(loss.scalar_value(), 10, msg=str(loss.scalar_value()))
loss.backward()
# Check the gradients
self.assertTrue(np.allclose(self.p1.grad_as_array(), 0.1 * ones),
msg=np.array_str(self.p1.grad_as_array()))
self.assertTrue(np.allclose(self.p2.grad_as_array(), 0.1 * ones),
msg=np.array_str(self.p2.grad_as_array()))
self.assertTrue(np.allclose(self.lp1.grad_as_array()[1], ones[
0]), msg=np.array_str(self.lp1.grad_as_array()))
self.assertTrue(np.allclose(self.lp2.grad_as_array()[1], ones[
0]), msg=np.array_str(self.lp2.grad_as_array()))
self.trainer.update()
# Check the updated parameters
self.assertTrue(np.allclose(self.p1.as_array(), ones * 0.99),
msg=np.array_str(self.p1.as_array()))
self.assertTrue(np.allclose(self.p2.as_array(), ones * 0.99),
msg=np.array_str(self.p2.as_array()))
self.assertTrue(np.allclose(self.lp1.as_array()[1], ones[
0] * 0.9), msg=np.array_str(self.lp1.as_array()[1]))
self.assertTrue(np.allclose(self.lp2.as_array()[1], ones[
0] * 0.9), msg=np.array_str(self.lp2.as_array()))
def test_param_change_after_update(self):
for trainer_type in dy.SimpleSGDTrainer, dy.AdamTrainer:
trainer = trainer_type(self.m)
for _ in range(100):
p = self.m.add_parameters((1,))
dy.renew_cg()
p.forward()
p.backward()
trainer.update()
def test_delete_model(self):
p = dy.ParameterCollection().add_parameters(
(1,), init=dy.ConstInitializer(1)
)
p.value()
gc.collect()
p.value()
def test_delete_parent_model(self):
model = dy.ParameterCollection().add_subcollection()
p = model.add_parameters(
(1,), init=dy.ConstInitializer(1)
)
p.value()
gc.collect()
p.value()
def test_parameters_initializers(self):
self.m.add_parameters((3, 5), init=0)
self.m.add_parameters((3, 5), init='uniform', scale=2.0)
self.m.add_parameters((3, 5), init='normal', mean=-1.0, std=2.5)
self.m.add_parameters((5, 5), init='identity')
# self.m.add_parameters((5,5), init='saxe')
self.m.add_parameters((3, 5), init='glorot')
self.m.add_parameters((3, 5), init='he')
arr = np.zeros((3, 5))
self.m.add_parameters(arr.shape, init=arr)
self.m.add_parameters((3, 5), init=dy.ConstInitializer(2.0))
def test_lookup_parameters_initializers(self):
p = self.m.add_lookup_parameters((3, 5), init=0)
p = self.m.add_lookup_parameters((3, 5), init='uniform', scale=2.0)
p = self.m.add_lookup_parameters(
(3, 5), init='normal', mean=-1.0, std=2.5)
p = self.m.add_lookup_parameters((3, 5), init='glorot')
p = self.m.add_lookup_parameters((3, 5), init='he')
arr = np.zeros((3, 5))
p = self.m.add_lookup_parameters(arr.shape, init=arr)
p = self.m.add_lookup_parameters((3, 5), init=dy.ConstInitializer(2.0))
array = np.arange(50).reshape(10, 5)
p = self.m.add_lookup_parameters(array.shape, init=array)
slice_array = array[8]
slice_param = p.batch([8]).npvalue()
for i in range(5):
self.assertEqual(slice_array[i], slice_param[i])
class TestBatchManipulation(unittest.TestCase):
def setUp(self):
# create model
self.m = dy.ParameterCollection()
# Parameter
self.p = self.m.add_lookup_parameters((2, 3))
# Values
self.pval = np.asarray([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
self.p.init_from_array(self.pval)
def test_lookup_batch(self):
dy.renew_cg()
x = dy.lookup_batch(self.p, [0, 1])
self.assertTrue(np.allclose(x.npvalue(), self.pval.T))
def test_pick_batch_elem(self):
dy.renew_cg()
x = dy.lookup_batch(self.p, [0, 1])
y = dy.pick_batch_elem(x, 1)
self.assertTrue(np.allclose(y.npvalue(), self.pval[1]))
def test_pick_batch_elems(self):
dy.renew_cg()
x = dy.lookup_batch(self.p, [0, 1])
y = dy.pick_batch_elems(x, [0])
self.assertTrue(np.allclose(y.npvalue(), self.pval[0]))
z = dy.pick_batch_elems(x, [0, 1])
self.assertTrue(np.allclose(z.npvalue(), self.pval.T))
def test_concatenate_to_batch(self):
dy.renew_cg()
x = dy.lookup_batch(self.p, [0, 1])
y = dy.pick_batch_elem(x, 0)
z = dy.pick_batch_elem(x, 1)
w = dy.concatenate_to_batch([y, z])
self.assertTrue(np.allclose(w.npvalue(), self.pval.T))
class TestIOPartialWeightDecay(unittest.TestCase):
def setUp(self):
self.file = "tmp.model"
self.m = dy.ParameterCollection()
self.m2 = dy.ParameterCollection()
self.p = self.m.add_parameters(1)
self.t = dy.SimpleSGDTrainer(self.m)
def test_save_load(self):
self.p.forward()
self.p.backward()
self.t.update()
dy.renew_cg()
v1 = self.p.value()
dy.save(self.file, [self.p])
[p2] = dy.load(self.file, self.m2)
v2 = p2.value()
self.assertTrue(np.allclose(v1, v2))
class TestIOEntireModel(unittest.TestCase):
def setUp(self):
self.file = "bilstm.model"
self.m = dy.ParameterCollection()
self.m2 = dy.ParameterCollection()
self.b = dy.BiRNNBuilder(2, 10, 10, self.m, dy.LSTMBuilder)
# Custom parameters
self.W1 = self.m.add_parameters(10)
self.W2 = self.m.add_parameters(12)
def test_save_load(self):
self.m.save(self.file)
dy.BiRNNBuilder(2, 10, 10, self.m2, dy.LSTMBuilder)
self.m2.add_parameters(10)
self.m2.add_parameters(12)
self.m2.populate(self.file)
def test_save_load_with_gradient(self):
# Make it so W1 has a gradient
dy.renew_cg()
dy.sum_elems(self.W1).backward()
# Record gradients
W1_grad = self.W1.grad_as_array()
W2_grad = self.W2.grad_as_array()
# Save the ParameterCollection
self.m.save(self.file)
# Populate
self.m.populate(self.file)
# Check that the gradients were saved
self.assertTrue(np.allclose(self.W1.grad_as_array(), W1_grad))
self.assertTrue(np.allclose(self.W2.grad_as_array(), W2_grad))
class TestIOPartial(unittest.TestCase):
def setUp(self):
self.file = "tmp.model"
self.m = dy.ParameterCollection()
self.m2 = dy.ParameterCollection()
self.L = self.m.add_lookup_parameters((10, 2), name="la")
self.a = self.m.add_parameters(10, name="a")
def test_save_load(self):
self.L.save(self.file, "/X")
self.a.save(self.file, append=True)
a = self.m2.add_parameters(10)
L = self.m2.add_lookup_parameters((10, 2))
L.populate(self.file, "/X")
a.populate(self.file, "/a")
class TestIOHighLevelAPI(unittest.TestCase):
def setUp(self):
self.file = "bilstm.model"
# create models
self.m = dy.ParameterCollection()
self.m2 = dy.ParameterCollection()
# Create birnn
self.b = dy.BiRNNBuilder(2, 10, 10, self.m, dy.LSTMBuilder)
def test_save_load(self):
dy.save(self.file, [self.b])
[b] = dy.load(self.file, self.m2)
def test_save_load_generator(self):
dy.save(self.file, (x for x in [self.b]))
[b] = list(dy.load_generator(self.file, self.m2))
class TestExpression(unittest.TestCase):
def setUp(self):
self.v1 = np.arange(10)
self.v2 = np.arange(10)[::-1]
def test_value(self):
dy.renew_cg()
x = dy.inputTensor(self.v1)
self.assertTrue(np.allclose(x.npvalue(), self.v1))
def test_value_sanity(self):
dy.renew_cg()
x = dy.inputTensor(self.v1)
dy.renew_cg()
self.assertRaises(RuntimeError, npvalue_callable, x)
def test_gradient(self):
dy.renew_cg()
x = dy.inputTensor(self.v1)
y = dy.inputTensor(self.v2)
loss = dy.dot_product(x, y)
loss.forward()
loss.backward(full=True)
self.assertTrue(np.allclose(x.gradient(), self.v2),
msg="{}\n{}\n{}\n{}\n{}".format(
loss.value(),
x.gradient(),
self.v2,
y.gradient(),
self.v2
))
def test_gradient_sanity(self):
dy.renew_cg()
x = dy.inputTensor(self.v1)
y = dy.inputTensor(self.v2)
loss = dy.dot_product(x, y)
loss.forward()
self.assertRaises(RuntimeError, gradient_callable, x)
class TestOperations(unittest.TestCase):
def setUp(self):
# create model
self.m = dy.ParameterCollection()
self.v1 = np.arange(10)
self.v2 = np.arange(10)
self.v3 = np.arange(10)
def test_layer_norm(self):
dy.renew_cg()
x = dy.inputTensor(self.v1)
g = dy.inputTensor(self.v2)
b = dy.inputTensor(self.v3)
y = dy.layer_norm(x, g, b)
loss = dy.sum_elems(y)
loss.backward()
centered_v1 = self.v1 - self.v1.mean()
y_np_value = self.v2 / self.v1.std() * centered_v1 + self.v3
self.assertTrue(np.allclose(y.npvalue(), y_np_value))
class TestSlicing(unittest.TestCase):
def test_slicing(self):
dy.renew_cg()
data = np.random.random((10, 10, 10))
self.assertTrue(np.allclose(dy.inputTensor(
data)[:1, :2, :3].npvalue(), data[:1, :2, :3]))
self.assertTrue(np.allclose(dy.inputTensor(data, batched=True)[
:1, :2, :3].npvalue(), data[:1, :2, :3]))
self.assertTrue(np.allclose(dy.inputTensor(
data)[:, :, :3].npvalue(), data[:, :, :3]))
self.assertTrue(np.allclose(dy.inputTensor(
data)[3:, :, :].npvalue(), data[3:, :, :]))
self.assertTrue(np.allclose(dy.inputTensor(
data)[:, :, ::1].npvalue(), data[:, :, ::1]))
self.assertTrue(np.allclose(dy.inputTensor(
data)[:, :, ::3].npvalue(), data[:, :, ::3]))
self.assertTrue(np.allclose(dy.inputTensor(
data)[3:5, 1:3, 1:].npvalue(), data[3:5, 1:3, 1:]))
class TestSimpleRNN(unittest.TestCase):
def setUp(self):
# create model
self.m = dy.ParameterCollection()
self.rnn = dy.SimpleRNNBuilder(2, 10, 10, self.m)
def test_get_parameters(self):
dy.renew_cg()
self.rnn.initial_state()
P_p = self.rnn.get_parameters()
P_e = self.rnn.get_parameter_expressions()
for l_p, l_e in zip(P_p, P_e):
for w_p, w_e in zip(l_p, l_e):
self.assertTrue(np.allclose(w_e.npvalue(), w_p.as_array()))
def test_get_parameters_sanity(self):
self.assertRaises(
ValueError, lambda x: x.get_parameter_expressions(), self.rnn)
class TestGRU(unittest.TestCase):
def setUp(self):
# create model
self.m = dy.ParameterCollection()
self.rnn = dy.GRUBuilder(2, 10, 10, self.m)
def test_get_parameters(self):
dy.renew_cg()
self.rnn.initial_state()
P_p = self.rnn.get_parameters()
P_e = self.rnn.get_parameter_expressions()
for l_p, l_e in zip(P_p, P_e):
for w_p, w_e in zip(l_p, l_e):
self.assertTrue(np.allclose(w_e.npvalue(), w_p.as_array()))
def test_get_parameters_sanity(self):
self.assertRaises(
ValueError, lambda x: x.get_parameter_expressions(), self.rnn)
class TestVanillaLSTM(unittest.TestCase):
def setUp(self):
# create model
self.m = dy.ParameterCollection()
self.rnn = dy.VanillaLSTMBuilder(2, 10, 10, self.m)
def test_get_parameters(self):
dy.renew_cg()
self.rnn.initial_state()
P_p = self.rnn.get_parameters()
P_e = self.rnn.get_parameter_expressions()
for l_p, l_e in zip(P_p, P_e):
for w_p, w_e in zip(l_p, l_e):
self.assertTrue(np.allclose(w_e.npvalue(), w_p.as_array()))
def test_get_parameters_sanity(self):
self.assertRaises(
ValueError, lambda x: x.get_parameter_expressions(), self.rnn)
def test_initial_state_vec(self):
dy.renew_cg()
init_s = [dy.ones(10), dy.ones(10), dy.ones(10), dy.ones(10)]
self.rnn.initial_state(init_s)
def test_set_h(self):
dy.renew_cg()
init_h = [dy.ones(10), dy.ones(10)]
state = self.rnn.initial_state()
state.set_h(init_h)
def test_set_c(self):
dy.renew_cg()
init_c = [dy.ones(10), dy.ones(10)]
state = self.rnn.initial_state()
state.set_s(init_c)
def test_set_s(self):
dy.renew_cg()
init_s = [dy.ones(10), dy.ones(10), dy.ones(10), dy.ones(10)]
state = self.rnn.initial_state()
state.set_s(init_s)
class TestCoupledLSTM(unittest.TestCase):
def setUp(self):
# create model
self.m = dy.ParameterCollection()
self.rnn = dy.CoupledLSTMBuilder(2, 10, 10, self.m)
def test_get_parameters(self):
dy.renew_cg()
self.rnn.initial_state()
P_p = self.rnn.get_parameters()
P_e = self.rnn.get_parameter_expressions()
for l_p, l_e in zip(P_p, P_e):
for w_p, w_e in zip(l_p, l_e):
self.assertTrue(np.allclose(w_e.npvalue(), w_p.as_array()))
def test_get_parameters_sanity(self):
self.assertRaises(
ValueError, lambda x: x.get_parameter_expressions(), self.rnn)
def test_initial_state_vec(self):
dy.renew_cg()
init_s = [dy.ones(10), dy.ones(10), dy.ones(10), dy.ones(10)]
self.rnn.initial_state(init_s)
def test_set_h(self):
dy.renew_cg()
init_h = [dy.ones(10), dy.ones(10)]
state = self.rnn.initial_state()
state.set_h(init_h)
def test_set_c(self):
dy.renew_cg()
init_c = [dy.ones(10), dy.ones(10)]
state = self.rnn.initial_state()
state.set_s(init_c)
def test_set_s(self):
dy.renew_cg()
init_s = [dy.ones(10), dy.ones(10), dy.ones(10), dy.ones(10)]
state = self.rnn.initial_state()
state.set_s(init_s)
class TestSparseLSTM(unittest.TestCase):
def setUp(self):
# create model
self.m = dy.ParameterCollection()
self.rnn = dy.SparseLSTMBuilder(2, 10, 10, self.m)
def test_get_parameters(self):
dy.renew_cg()
self.rnn.initial_state()
P_p = self.rnn.get_parameters()
P_e = self.rnn.get_parameter_expressions()
for l_p, l_e in zip(P_p, P_e):
for w_p, w_e in zip(l_p, l_e):
self.assertTrue(np.allclose(w_e.npvalue(), w_p.as_array()))
def test_get_parameters_sanity(self):
self.assertRaises(ValueError, lambda x: x.get_parameter_expressions(), self.rnn)
class TestFastLSTM(unittest.TestCase):
def setUp(self):
# create model
self.m = dy.ParameterCollection()
self.rnn = dy.FastLSTMBuilder(2, 10, 10, self.m)
def test_get_parameters(self):
dy.renew_cg()
self.rnn.initial_state()
P_p = self.rnn.get_parameters()
P_e = self.rnn.get_parameter_expressions()
for l_p, l_e in zip(P_p, P_e):
for w_p, w_e in zip(l_p, l_e):
self.assertTrue(np.allclose(w_e.npvalue(), w_p.as_array()))
def test_get_parameters_sanity(self):
self.assertRaises(
ValueError, lambda x: x.get_parameter_expressions(), self.rnn)
def test_initial_state_vec(self):
dy.renew_cg()
init_s = [dy.ones(10), dy.ones(10), dy.ones(10), dy.ones(10)]
self.rnn.initial_state(init_s)
def test_set_h(self):
dy.renew_cg()
init_h = [dy.ones(10), dy.ones(10)]
state = self.rnn.initial_state()
state.set_h(init_h)
def test_set_c(self):
dy.renew_cg()
init_c = [dy.ones(10), dy.ones(10)]
state = self.rnn.initial_state()
state.set_s(init_c)
def test_set_s(self):
dy.renew_cg()
init_s = [dy.ones(10), dy.ones(10), dy.ones(10), dy.ones(10)]
state = self.rnn.initial_state()
state.set_s(init_s)
class TestStandardSoftmax(unittest.TestCase):
def setUp(self):
# create model
self.pc = dy.ParameterCollection()
self.sm = dy.StandardSoftmaxBuilder(3, 10, self.pc, True)
def test_sanity(self):
for i in range(3):
dy.renew_cg()
nll = self.sm.neg_log_softmax(
dy.inputTensor(np.arange(3)), 4, update=True)
nll_const = self.sm.neg_log_softmax(
dy.inputTensor(np.arange(3)), 5, update=False)
nll = self.sm.neg_log_softmax(
dy.inputTensor(np.arange(3)), 6, update=True)
nll_const = self.sm.neg_log_softmax(
dy.inputTensor(np.arange(3)), 7, update=False)
nll.value()
nll_const.value()
class TestClassFactoredSoftmax(unittest.TestCase):
def setUp(self):
# create model
self.pc = dy.ParameterCollection()
dic = dict()
with open('cluster_file.txt', 'w+') as f:
for i in range(5):
f.write(str(i) + " " + str(2 * i) + "\n")
f.write(str(i) + " " + str(2 * i + 1) + "\n")
dic[str(2 * i)] = len(dic)
dic[str(2 * i + 1)] = len(dic)
self.sm = dy.ClassFactoredSoftmaxBuilder(
3, 'cluster_file.txt', dic, self.pc, True)
def test_sanity(self):
for i in range(3):
dy.renew_cg()
nll = self.sm.neg_log_softmax(
dy.inputTensor(np.arange(3)), 4, update=True)
nll_const = self.sm.neg_log_softmax(
dy.inputTensor(np.arange(3)), 5, update=False)
nll = self.sm.neg_log_softmax(
dy.inputTensor(np.arange(3)), 6, update=True)
nll_const = self.sm.neg_log_softmax(
dy.inputTensor(np.arange(3)), 7, update=False)
nll.value()
nll_const.value()
if __name__ == '__main__':
unittest.main()
| 26,802 | 32.970849 | 88 | py |
dynet | dynet-master/bench/sequence_transduction.py | import dynet as dy
import random
import time
import sys
random.seed(1)
SEQ_LENGTH=2
BATCH_SIZE=2
HIDDEN=1
NCLASSS=2
EMBED_SIZE=1
N_SEQS=1000
autobatching=True
dy.renew_cg()
random_seq = lambda ln,t: [random.randint(0,t-1) for _ in xrange(ln)]
seq_lengths = [SEQ_LENGTH for _ in range(N_SEQS)]
#seq_lengths = [random.randint(10, SEQ_LENGTH) for _ in range(N_SEQS)]
Xs = [random_seq(L, 100) for L in seq_lengths]
Ys = [random_seq(L, NCLASSS) for L in seq_lengths]
m = dy.Model()
trainer = dy.SimpleSGDTrainer(m)
E = m.add_lookup_parameters((1000, EMBED_SIZE))
fwR = dy.VanillaLSTMBuilder(1, EMBED_SIZE, HIDDEN, m)
bwR = dy.VanillaLSTMBuilder(1, EMBED_SIZE, HIDDEN, m)
T_= m.add_parameters((HIDDEN, HIDDEN*2))
fwR2 = dy.VanillaLSTMBuilder(1, EMBED_SIZE, HIDDEN, m)
bwR2 = dy.VanillaLSTMBuilder(1, EMBED_SIZE, HIDDEN, m)
W_= m.add_parameters((NCLASSS, HIDDEN*1))
total_time = 0.0
def transduce(seq,Y):
seq = [E[i] for i in seq]
fw = fwR.initial_state().transduce(seq)
# this UNUSED part affects strategy 2
XXX = fwR2.initial_state().transduce([E[3],E[5]])
W = W_.expr()
outs = [W*z for z in fw]
losses = [dy.pickneglogsoftmax(o,y) for o,y in zip(outs,Y)]
s = dy.esum(losses)
return s
batch=[]
start = time.time()
for X,Y in zip(Xs,Ys):
loss = transduce(X,Y)
batch.append(loss)
if len(batch)==BATCH_SIZE:
s = dy.esum(batch)
s_ = time.time()
s.forward()
total_time = total_time + time.time() - s_
print s.npvalue()
sys.exit()
#break
s.backward()
trainer.update()
batch = []
dy.renew_cg()
print "total time:",time.time() - start, len(Xs) / (time.time() - start)
print "forward time:",total_time, len(Xs) / total_time
| 1,763 | 23.84507 | 72 | py |
dynet | dynet-master/doc/source/doc_util.py | from __future__ import print_function
import re
INDENT = 1
NAME = 2
INHERIT = 3
ARGUMENTS = 3
PASS=' pass\n'
def pythonize_arguments(arg_str):
"""
Remove types from function arguments in cython
"""
out_args = []
# If there aren't any arguments return the empty string
if arg_str is None:
return out_str
args = arg_str.split(',')
for arg in args:
components = arg.split('=')
name_and_type=components[0].split(' ')
# There is probably type info
if name_and_type[-1]=='' and len(name_and_type)>1:
name=name_and_type[-2]
else:
name=name_and_type[-1]
# if there are default parameters
if len(components)>1:
name+='='+components[1]
out_args.append(name)
return ','.join(out_args)
def get_indent(indent_str):
"""
Check if the indent exists
"""
if indent_str is None:
return ''
else:
return indent_str
def get_inherit(inherit_str):
"""
Check if there is a parent class
"""
if inherit_str is None:
return ''
else:
return inherit_str
def get_func_name(func_str):
"""
Get function name, ie removes possible return type
"""
name = func_str.split(' ')[-1]
return name
def create_doc_copy(in_file='../../python/_dynet.pyx', out_file='dynet.py'):
in_comment = False
in_func = False
with open(out_file, 'w+') as py:
with open(in_file, 'r') as pyx:
for l in pyx:
# Check if this line is a function declaration (def or cpdef)
is_func = re.match(r'(\s*)(?:cp)?def (.*)\((.*)\):', l, re.I)
if is_func:
# If the previous line was a function, print pass
if in_func:
print(indent + PASS, file=py)
# Preserve indentation
indent = get_indent(is_func.group(INDENT))
# Get function name
name = get_func_name(is_func.group(NAME))
# Get arguments
arguments = pythonize_arguments(is_func.group(ARGUMENTS))
# Print declaration
print(indent + "def "+name+"("+arguments+"):", file=py)
# Now in function body
in_func = True
continue
# Check if this line declares a class
is_class = re.match(r'(\s*)(?:cdef )?class (.*)(\(.*\))?:', l, re.I)
if is_class:
# Preserve indentation
indent = get_indent(is_class.group(INDENT))
# Get parent class
inherit = get_inherit(is_class.group(INHERIT))
# Print declaration
print(indent + "class "+is_class.group(NAME)+inherit+":", file=py)
# Handle comments (better)
is_comment = re.match(r'(\s*)"""(.*)', l, re.I) or ('"""' in l and in_comment) # This last case is to account for end of line """ to end the comment
# If start or beginning of comment
if is_comment:
# If end of comment, print the """
if in_comment:
print(l[:-1], file=py)
# Toggle in_comment indicator
in_comment = not in_comment
# If this is a single line comment, end in_comment scope
if l.count('"""') > 1:
in_comment = False
# Print comment line
if in_comment:
print(l[:-1], file=py)
continue
# If not in comment anymore but still in function scope, print pass
if in_func:
print(indent + PASS, file=py)
in_func = False
| 3,948 | 33.043103 | 164 | py |
dynet | dynet-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# DyNet documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 13 16:13:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import subprocess
sys.path.insert(0, os.path.abspath('.'))
import doc_util
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('../../examples/tutorials'))
tutorials_folder = 'tutorials_notebooks'
if os.path.islink(tutorials_folder):
os.remove(tutorials_folder)
os.symlink('../../examples/jupyter-tutorials', tutorials_folder)
# Create copy of _dynet.pyx for documentation purposes
doc_util.create_doc_copy(in_file = '../../python/_dynet.pyx',out_file = 'dynet.py')
# Run doxygen if on Readthedocs :
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
subprocess.call('cd ../doxygen; doxygen', shell=True)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'breathe',
'nbsphinx',
'sphinx.ext.autodoc',
'm2r',
'sphinxcontrib.napoleon' # Yay Napoleon! Go France!
]
breathe_projects = {"dynet": "../doxygen/xml/"}
breathe_default_project = "dynet"
# Don't execute notebooks because it requires installing DyNet
nbsphinx_execute = 'never'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DyNet'
copyright = u'2016, Clab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/dynet_logo_white.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DyNetdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DyNet.tex', u'DyNet Documentation',
u'Clab', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dynet', u'DyNet Documentation',
[u'Clab'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DyNet', u'DyNet Documentation',
u'Clab', 'DyNet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 9,095 | 30.583333 | 83 | py |
pose_refinement | pose_refinement-master/src/training/loaders.py | import numpy as np
from torch.utils.data import DataLoader, SequentialSampler
from itertools import chain
import torch
from databases.datasets import pose_grid_from_index, Mpi3dTrainDataset, PersonStackedMucoTempDataset, ConcatPoseDataset
class ConcatSampler(torch.utils.data.Sampler):
""" Concatenates two samplers. """
def __init__(self, sampler1, sampler2):
self.sampler1 = sampler1
self.sampler2 = sampler2
def __iter__(self):
return chain(iter(self.sampler1), iter(self.sampler2))
def __len__(self):
return len(self.sampler1) + len(self.sampler2)
class UnchunkedGenerator:
"""
Loader that can be used with VideoPose3d model to load all frames of a video at once.
Useful for testing/prediction.
"""
def __init__(self, dataset, pad, augment):
self.seqs = sorted(np.unique(dataset.index.seq))
self.dataset = dataset
self.pad = pad
self.augment = augment
def __iter__(self):
for seq in self.seqs:
inds = np.where(self.dataset.index.seq == seq)[0]
batch = self.dataset.get_samples(inds, False)
batch_2d = np.expand_dims(np.pad(batch['pose2d'], ((self.pad, self.pad), (0, 0)), 'edge'), axis=0)
batch_3d = np.expand_dims(batch['pose3d'], axis=0)
batch_valid = np.expand_dims(batch['valid_pose'], axis=0)
if self.augment:
flipped_batch = self.dataset.get_samples(inds, True)
flipped_batch_2d = np.expand_dims(np.pad(flipped_batch['pose2d'],
((self.pad, self.pad), (0, 0)), 'edge'), axis=0)
flipped_batch_3d = np.expand_dims(flipped_batch['pose3d'], axis=0)
batch_2d = np.concatenate((batch_2d, flipped_batch_2d), axis=0)
batch_3d = np.concatenate((batch_3d, flipped_batch_3d), axis=0)
batch_valid = np.concatenate((batch_valid, batch_valid), axis=0)
# yield {'pose2d': batch_2d, 'pose3d':batch_3d}
yield batch_2d, batch_valid
class ChunkedGenerator:
"""
Generator to be used with temporal model, during training.
"""
def __init__(self, dataset, batch_size, pad, augment, shuffle=True):
"""
pad: 2D input padding to compensate for valid convolutions, per side (depends on the receptive field)
it is usually (receptive_field-1)/2
augment: turn on random horizontal flipping for training
shuffle: randomly shuffle the dataset before each epoch
"""
assert isinstance(dataset, (Mpi3dTrainDataset, PersonStackedMucoTempDataset, ConcatPoseDataset)), "Only works with Mpi datasets"
self.dataset = dataset
self.batch_size = batch_size
self.pad = pad
self.shuffle = shuffle
self.augment = augment
N = len(dataset.index)
frame_start = np.arange(N)-pose_grid_from_index(dataset.index.seq)[1] # index of the start of the frame
frame_end = np.arange(N)-pose_grid_from_index(dataset.index.seq[::-1])[1]
frame_end = N-frame_end[::-1]-1 # index of the end of the frame (last frame)
self.frame_start = frame_start
self.frame_end = frame_end
assert np.all(frame_start<=frame_end)
assert np.all(dataset.index.seq[frame_start] == dataset.index.seq[frame_end])
assert np.all(dataset.index.seq[frame_start] == dataset.index.seq)
def __len__(self):
return len(self.dataset)//self.batch_size
def __iter__(self):
N = len(self.dataset)
num_batch = N//self.batch_size
indices = np.arange(N)
if self.shuffle:
np.random.shuffle(indices)
SUB_BATCH = 4
assert self.batch_size % SUB_BATCH == 0, "SUB_BATCH must divide batch_size"
class LoadingDataset:
def __len__(iself):
return num_batch*SUB_BATCH
def __getitem__(iself, ind):
sub_batch_size = self.batch_size//SUB_BATCH
batch_inds = indices[ind*sub_batch_size: (ind+1)*sub_batch_size] # (nBatch,)
batch_frame_start = self.frame_start[batch_inds][:, np.newaxis]
batch_frame_end = self.frame_end[batch_inds][:, np.newaxis]
if self.augment:
flip = np.random.random(sub_batch_size) < 0.5
else:
flip = np.zeros(sub_batch_size, dtype='bool')
flip = np.tile(flip[:, np.newaxis], (1, 2*self.pad+1))
# expand batch_inds such that it includes lower&upper bound indices for every element
chunk_inds = batch_inds[:, np.newaxis] + np.arange(-self.pad, self.pad+1)[np.newaxis, :]
chunk_inds = np.clip(chunk_inds, batch_frame_start, batch_frame_end)
assert np.all(chunk_inds>=batch_frame_start)
assert np.all(chunk_inds<=batch_frame_end)
chunk = self.dataset.get_samples(chunk_inds.ravel(), flip.ravel())
chunk_pose2d = chunk['pose2d'].reshape(chunk_inds.shape+chunk['pose2d'].shape[1:])
chunk_pose3d = chunk['pose3d'].reshape(chunk_inds.shape+chunk['pose3d'].shape[1:])
chunk_valid = chunk['valid_pose'].reshape(chunk_inds.shape+chunk['valid_pose'].shape[1:])
# for non temporal values select the middle item:
chunk_pose3d = chunk_pose3d[:, self.pad]
chunk_valid = chunk_valid[:, self.pad]
chunk_pose3d = np.expand_dims(chunk_pose3d, 1)
return chunk_pose2d, chunk_pose3d, chunk_valid
wrapper_dataset = LoadingDataset()
loader = DataLoader(wrapper_dataset, sampler=SequentialSampler(wrapper_dataset),
batch_size=SUB_BATCH, num_workers=4)
for chunk_pose2d, chunk_pose3d, chunk_valid in loader:
chunk_pose2d = chunk_pose2d.reshape((-1,)+chunk_pose2d.shape[2:])
chunk_pose3d = chunk_pose3d.reshape((-1,)+chunk_pose3d.shape[2:])
chunk_valid = chunk_valid.reshape(-1)
yield {'temporal_pose2d': chunk_pose2d, 'pose3d': chunk_pose3d, 'valid_pose': chunk_valid}
| 6,259 | 41.297297 | 136 | py |
pose_refinement | pose_refinement-master/src/training/callbacks.py | import math
import numpy as np
import torch
from training.loaders import UnchunkedGenerator
from training.torch_tools import eval_results
from util.pose import remove_root, mrpe, optimal_scaling, r_mpjpe
class BaseCallback(object):
def on_itergroup_end(self, iter_cnt, epoch_loss):
pass
def on_epoch_end(self, model, epoch, epoch_loss, optimizer, epoch_vals):
pass
def _sample_value(dictionary):
""" Selects a value from a dictionary, it is always the same element. """
return list(dictionary.values())[0]
class BaseMPJPECalculator(BaseCallback):
"""
Base class for calculating and displaying MPJPE stats, grouped by something (sequence most of the time).
"""
PCK_THRESHOLD = 150
def __init__(self, data_3d_mm, joint_set, post_process3d=None, csv=None, prefix='val'):
"""
:param data_3d_mm: dict, group_name-> ndarray(n.Poses, nJoints, 3). The ground truth poses in mm.
"""
self.csv = csv
self.prefix = prefix
self.pctiles = [5, 10, 50, 90, 95, 99]
if self.csv is not None:
with open(csv, 'w') as f:
f.write('epoch,type,name,avg')
f.write(''.join([',pct' + str(x) for x in self.pctiles]))
f.write('\n')
self.data_3d_mm = data_3d_mm
self.is_absolute = _sample_value(self.data_3d_mm).shape[1] == joint_set.NUM_JOINTS
self.num_joints = joint_set.NUM_JOINTS if self.is_absolute else joint_set.NUM_JOINTS - 1
self.joint_set = joint_set
self.post_process3d = post_process3d
self.sequences = sorted(list(data_3d_mm.keys()))
def on_epoch_end(self, model, epoch, epoch_loss, optimizer, epoch_vals):
sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles = self.eval(model, verbose=True)
if self.csv is not None:
joint_names = self.joint_set.NAMES.copy()
if not self.is_absolute:
joint_names = np.delete(joint_names, self.joint_set.index_of('hip')) # remove root
with open(self.csv, 'a') as f:
for seq in self.sequences:
f.write('%d,%s,%s,%f' % (epoch, 'sequence', seq, sequence_mpjpes[seq]))
for i in range(len(self.pctiles)):
f.write(',%f' % sequence_pctiles[seq][i])
f.write('\n')
for joint_id in range(self.num_joints):
f.write('%d,%s,%s,%f' % (epoch, 'joint', joint_names[joint_id], joint_means[joint_id]))
for i in range(len(self.pctiles)):
f.write(',%f' % joint_pctiles[i, joint_id])
f.write('\n')
def eval(self, model=None, calculate_scale_free=False, verbose=False):
"""
:param model: the evaluator can use this model, if self.model is nor provided
:param calculate_scale_free: if True, also calculates N-MRPE and N_RMPJPE
:return:
"""
losses, preds = self.pred_and_calc_loss(model)
losses = np.concatenate([losses[seq] for seq in self.sequences])
self.val_loss = np.nanmean(losses)
self.losses_to_log = {self.prefix + '_loss': self.val_loss}
self.losses = losses
self.preds = preds
# Assuming hip is the last component
if self.is_absolute:
self.losses_to_log[self.prefix + '_abs_loss'] = np.nanmean(losses[:, -3:])
self.losses_to_log[self.prefix + '_rel_loss'] = np.nanmean(losses[:, :-3])
else:
self.losses_to_log[self.prefix + '_rel_loss'] = self.val_loss
assert self.pctiles[-1] == 99, "Currently the last percentile is hardcoded to be 99 for printing"
sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles = \
eval_results(preds, self.data_3d_mm, self.joint_set, pctiles=self.pctiles, verbose=verbose)
self.losses_to_log[self.prefix + '_mrpe'] = np.mean([mrpe(preds[s], self.data_3d_mm[s], self.joint_set)
for s in preds])
# Calculate relative error
if self.is_absolute:
rel_pred = {}
rel_gt = {}
for seq in preds:
rel_pred[seq] = remove_root(preds[seq], self.joint_set.index_of('hip'))
rel_gt[seq] = remove_root(self.data_3d_mm[seq], self.joint_set.index_of('hip'))
rel_mean_error, _, _, _, _ = eval_results(rel_pred, rel_gt, self.joint_set, verbose=False)
rel_mean_error = np.mean(np.asarray(list(rel_mean_error.values()), dtype=np.float32))
if verbose:
print("Root relative error (MPJPE): %.2f" % rel_mean_error)
self.rel_mean_error = rel_mean_error
self.losses_to_log[self.prefix + '_rel_error'] = rel_mean_error
self.mean_sequence_mpjpe = np.mean(np.asarray(list(sequence_mpjpes.values()), dtype=np.float32))
self.mean_sequence_pck = np.mean(np.asarray(list(sequence_pcks.values()), dtype=np.float32))
self.losses_to_log[self.prefix + '_err'] = self.mean_sequence_mpjpe
self.losses_to_log[self.prefix + '_pck'] = self.mean_sequence_pck
if calculate_scale_free:
scaled_preds = {}
for seq in preds:
# predict a single scale for the full video
pred_points = preds[seq].reshape(1, -1, 3)
gt_points = self.data_3d_mm[seq].reshape(1, -1, 3)
s = optimal_scaling(pred_points, gt_points)
scaled_preds[seq] = preds[seq] * s
n_mrpe = np.mean([mrpe(scaled_preds[s], self.data_3d_mm[s], self.joint_set) for s in scaled_preds])
n_rmpjpe = np.mean([r_mpjpe(scaled_preds[s], self.data_3d_mm[s], self.joint_set) for s in scaled_preds])
if verbose:
print('N-MRPE: %.1f' % n_mrpe)
print('N-MPJPE: %.1f' % n_rmpjpe)
self.losses_to_log[self.prefix + '_n_mrpe'] = n_mrpe
self.losses_to_log[self.prefix + '_n_rel_err'] = n_rmpjpe
return sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles
def results_and_gt(self):
"""
Returns the gt and result matrices as list of (seq, pred, gt) tuples
"""
keys = sorted(list(self.data_3d_mm.keys()))
return [(seq, self.preds[seq], self.data_3d_mm[seq]) for seq in keys]
def pred_and_calc_loss(self, model):
"""
Subclasses must implement this method. It calculates the loss
and the predictions of the current model.
:param model: model received in the on_epoch_end callback
:return: (loss, pred) pair, each is a dictionary from sequence name to loss or prediction
"""
raise NotImplementedError()
class TemporalTestEvaluator(BaseMPJPECalculator):
""" Can be used with MPII-3DHP dataset to create"""
def __init__(self, model, dataset, loss, augment, post_process3d=None, prefix='test'):
self.model = model
self.dataset = dataset
self.augment = augment
pad = (model.receptive_field() - 1) // 2
self.generator = UnchunkedGenerator(dataset, pad, self.augment)
self.seqs = sorted(np.unique(dataset.index.seq))
data_3d_mm = {}
self.preprocessed3d = {}
for seq in self.seqs:
inds = np.where(dataset.index.seq == seq)[0]
batch = dataset.get_samples(inds, False)
self.preprocessed3d[seq] = batch['pose3d'][batch['valid_pose']]
data_3d_mm[seq] = dataset.poses3d[inds][batch['valid_pose']]
if loss == 'l1' or loss == 'l1_nan':
self.loss = lambda p, t: np.abs(p - t)
elif loss == 'l2':
self.loss = lambda p, t: np.square(p - t)
super().__init__(data_3d_mm, dataset.pose3d_jointset, post_process3d=post_process3d, csv=None, prefix=prefix)
def pred_and_calc_loss(self, model):
"""
Subclasses must implement this method. It calcula
:param model: model received in the on_epoch_end callback
:return: (loss, pred) pair, each is a dictionary from sequence name to loss or prediction
"""
preds = {}
self.raw_preds = {}
losses = {}
with torch.no_grad():
for i, (pose2d, valid) in enumerate(self.generator):
seq = self.seqs[i]
pred3d = self.model(torch.from_numpy(pose2d).cuda()).detach().cpu().numpy()
self.raw_preds[seq] = pred3d.copy()
valid = valid[0]
losses[seq] = self.loss(pred3d[0][valid], self.preprocessed3d[seq])
pred_real_pose = self.post_process3d(pred3d[0], seq) # unnormalized output
if self.augment:
pred_real_pose_aug = self.post_process3d(pred3d[1], seq)
pred_real_pose_aug[:, :, 0] *= -1
pred_real_pose_aug = self.dataset.pose3d_jointset.flip(pred_real_pose_aug)
pred_real_pose = (pred_real_pose + pred_real_pose_aug) / 2
preds[seq] = pred_real_pose[valid]
return losses, preds
class TemporalMupotsEvaluator(TemporalTestEvaluator):
""" Can be used with PersonStackedMupots dataset for a temporal model. """
def __init__(self, model, dataset, loss, augment, post_process3d=None, prefix='test'):
super().__init__(model, dataset, loss, augment, post_process3d=post_process3d, prefix=prefix)
self.data_3d_mm = TemporalMupotsEvaluator._group_by_seq(self.data_3d_mm)
self.sequences = sorted(self.data_3d_mm.keys())
@staticmethod
def _group_by_seq(data):
per_person_keys = sorted(data.keys())
result = {}
for seq in range(1, 21):
keys = sorted([k for k in per_person_keys if k.startswith('%d/' % seq)])
assert len(keys) > 0, per_person_keys
result[seq] = np.concatenate([data[k] for k in keys])
return result
def pred_and_calc_loss(self, model):
losses, preds = super().pred_and_calc_loss(model)
losses = TemporalMupotsEvaluator._group_by_seq(losses)
preds = TemporalMupotsEvaluator._group_by_seq(preds)
return losses, preds
class ModelCopyTemporalEvaluator(TemporalTestEvaluator):
"""
Same as TemporalTestEvaluator but uses another model for evaluation than for training,
and before evaluation copies the weights to the 'eval' model
"""
def pred_and_calc_loss(self, train_model):
""" train_model is coming from the training loop """
self.model.load_state_dict(train_model.state_dict())
self.model.eval()
return super().pred_and_calc_loss(None)
def preds_from_logger(dataset, logger):
"""
Arranges results from LogAllMillimeterCallback according to index in dataset
"""
# Special handling for multipose inputs
if dataset.poses3d.ndim == 4:
pose_shape = list(logger.data_3d_mm.values())[0].shape
result = np.zeros((dataset.poses3d.shape[:2]) + pose_shape[1:])
result[:] = np.nan
seqs = np.unique(dataset.index.seq)
for seq in seqs:
inds = dataset.index.seq == seq
mask = np.zeros(result.shape[:2], dtype='bool')
assert np.all(~mask)
mask[inds] = dataset.good_poses[inds] # composing masks
result[mask] = logger.preds[seq]
return result
elif dataset.poses3d.ndim == 3:
pose_shape = list(logger.data_3d_mm.values())[0].shape
result = np.zeros((len(dataset.index),) + pose_shape[1:])
seqs = np.unique(dataset.index.seq)
for seq in seqs:
inds = dataset.index.seq == seq
mask = np.zeros(len(result), dtype='bool')
mask[inds] = dataset.good_poses[inds] # composing masks
result[mask] = logger.preds[seq]
return result
else:
raise Exception("unexpected shape")
class ModelSaver(BaseCallback):
"""
Saves the best model at every epoch.
%d in the path can specify the epoch
"""
def __init__(self, path):
self.path = path
def on_epoch_end(self, model, epoch, epoch_loss, optimizer, epoch_vals):
path = self.path
if '%d' in path:
path = path % epoch
torch.save(model.state_dict(), path)
class BestModelSaver(BaseCallback):
"""
Saves the best model according to a given metric.
Useful together with early stopping.
"""
def __init__(self, path, evaluator, metric, lower_better=True):
assert lower_better, "lower_better=False not implemented yet"
self.path = path
self.evaluator = evaluator
self.metric = metric
self.best_value = math.inf
def on_epoch_end(self, model, epoch, epoch_loss, optimizer, epoch_vals):
path = self.path
if '%d' in path:
path = path % epoch
if self.evaluator.losses_to_log[self.metric] < self.best_value:
self.best_value = self.evaluator.losses_to_log[self.metric]
torch.save(model.state_dict(), path)
| 13,217 | 38.57485 | 117 | py |
pose_refinement | pose_refinement-master/src/training/torch_tools.py | import numpy as np
from torch.utils.data import DataLoader, TensorDataset
from itertools import zip_longest, chain
import torch
from util.misc import assert_shape
from inspect import signature
import time
from torch import optim
from util.pose import mrpe
def exp_decay(params):
def f(epoch):
return params.learning_rate * (0.96 ** (epoch * 0.243))
return f
def dataset2numpy(dataset, fields):
"""
Converts a PyTorch Dataset to a numpy array.
Parameters:
fields: list of fields to return from the full dataset.
"""
loader = DataLoader(dataset, batch_size=len(dataset) // 8, num_workers=8)
parts = []
for l in loader:
parts.append(l)
return [np.concatenate([p[f].numpy() for p in parts], axis=0) for f in fields]
def torch_predict(model, input, batch_size=None, device='cuda'):
"""
:param model: PyTorch Model(nn.Module)
:param input: a numpy array or a PyTorch dataloader
:param batch_size: if input was a numpy array, this is the batch size used for evaluation
:return:
"""
model.eval()
if isinstance(input, np.ndarray):
data_loader = DataLoader(TensorDataset(torch.from_numpy(input).to(device)), batch_size)
needs_move = False
elif isinstance(input, torch.Tensor):
data_loader = DataLoader(TensorDataset(input.to(device)), batch_size)
needs_move = False
else:
data_loader = input
needs_move = True
result = []
with torch.no_grad():
for batch in data_loader:
if needs_move:
if isinstance(batch, (list, tuple, map)):
batch = map(lambda x: x.to(device), batch)
elif isinstance(batch, dict):
batch = {k: v.to(device) for k, v in batch.items()}
else:
batch = batch.to(device)
if isinstance(batch, (list, tuple, map)):
pred = model(*batch)
elif isinstance(batch, dict):
pred = model(**batch)
else:
pred = model(batch)
if isinstance(pred, (list, tuple, map)):
result.append([x.cpu().numpy() for x in pred])
else:
result.append(pred.cpu().numpy())
del pred
if isinstance(result[0], list):
out = []
for i in range(len(result[0])):
out.append(np.concatenate([x[i] for x in result]))
result = out
else:
result = np.concatenate(result)
return result
def torch_eval(model, loader, loss_fn, input_name, target_name, device='cuda'):
"""
Evaluates a PyTorch model.
:param model: PyTorch Model(nn.Module)
:param loader: a PyTorch DataLoader producing input batches
:param loss_fn: a function or dictionary of functions. The metrics evaluated. The functions
should return a single scalar torch tensor. They can have 3 parameters, the third is optional.
The first is the input to model, the second is the target variable, and the thirs is the full
batch used in the eval iteration.
It is expected that the output losses are averaged over the batch.
:param input_name: name of input fields passed to the model (a single name or array of names)
:return:
"""
assert isinstance(loader, DataLoader)
model.eval()
loss_was_func = False
if not isinstance(loss_fn, dict):
loss_fn = {'loss': loss_fn}
loss_was_func = True
if not isinstance(input_name, (list, tuple)):
input_name = [input_name]
metrics = {}
num_args = {}
for name, func in loss_fn.items():
metrics[name] = 0
num_args[name] = len(signature(func).parameters)
total_cnt = 0
with torch.no_grad():
for batch in loader:
# batch = list(map(lambda x: x.to(device), batch))
batch = {k: v.to(device) for k, v in batch.items()}
pred = model(*[batch[x] for x in input_name])
for name, loss_func in loss_fn.items():
if num_args[name] == 2:
loss = loss_func(pred, batch[target_name])
else:
loss = loss_func(pred, batch[target_name], batch)
metrics[name] += loss.item() * len(batch[input_name[0]])
total_cnt += len(batch[input_name[0]])
for name in loss_fn.keys():
metrics[name] /= total_cnt
if loss_was_func:
return metrics['loss']
else:
return metrics
def get_optimizer(parameters, config):
if config['optimiser'] == "adam":
return optim.Adam(parameters, lr=config['learning_rate'], amsgrad=config['adam_amsgrad'])
elif config['optimiser'] == "rmsprop":
return optim.RMSprop(parameters, lr=config['learning_rate'])
elif config['optimiser'] == "sgd":
return optim.SGD(parameters, lr=config['learning_rate'], momentum=config['sgd_momentum'])
elif config['optimiser'] == "radam":
return RAdam(parameters, lr=config['learning_rate'])
else:
raise Exception('Unimplemented optimiser: ' + config['optimiser'])
def _get_scheduler(optimizer, config):
""" Decodes a scheduler config. Returns none if no schedulers were specified """
if config is None or config['type'] == 'none':
return None
# scheduler = None
# assert not _config['weight_decay'] or not _config['lr_div_10'], "weight decay and stepwise lr can't be turned on at the same time"
if config['type'] == 'martinez_weight_decay':
return optim.lr_scheduler.LambdaLR(optimizer, lambda x: (0.96 ** (x * 0.243)))
elif config['type'] == 'multiplicative':
return optim.lr_scheduler.StepLR(optimizer, step_size=config['step_size'], gamma=config['multiplier'])
elif config['type'] == 'lr_div_10_wd': # exponential decay + division by ten at certain epochs
def lr_fn(x):
scale = config['lr_div_10_scale']
base = (0.96 ** (x * 0.243))
if x >= 80:
factor = scale * scale
elif x >= 40:
factor = scale
else:
factor = 1
return factor * base
return optim.lr_scheduler.LambdaLR(optimizer, lr_fn)
else:
raise NotImplementedError("Unknown scheduler type: ", config['type'])
def torch_train(train_loader, model, update_fn, _config, callbacks=[]):
"""
Trains a model.
:param train_loader: training data is loaded from here, PyTorch DataLoader
:param model: PyTorch model to train
:param update_fn: the function called on every iteration, must calculate the loss
:param _config: Sacred config object
:param callbacks: optional callbacks for training
:return:
"""
optimizer = get_optimizer(model.parameters(), _config)
scheduler = _get_scheduler(optimizer, _config['lr_scheduler'])
if not isinstance(callbacks, list):
callbacks = [callbacks]
epoch_len = _config['num_epochs']
iter_cnt = 0
for epoch in range(epoch_len): # loop over the dataset multiple times
model.train()
epoch_loss = 0
epoch_val = {}
epoch_start = time.time()
iter_start = time.time()
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# zero the parameter gradients
optimizer.zero_grad()
batch_start = time.time()
loss, vals = update_fn(model, data)
loss.backward()
optimizer.step()
batch_time = time.time() - batch_start
# print statistics
running_loss += loss.item()
epoch_loss += loss.item()
for k, v in vals.items():
epoch_val[k] = epoch_val.get(k, 0) + v
del loss # free up memory
if (i + 1) % 50 == 0: # print every 50 mini-batches
iter_time = (time.time() - iter_start) / 50
print('\r[%d, %5d] loss: %.3f b=%4dms i=%dms' % (epoch + 1, i + 1, running_loss / 50,
int(batch_time * 1000), int(iter_time * 1000)), end='')
for c in callbacks:
c.on_itergroup_end(iter_cnt, running_loss / 50)
running_loss = 0.0
iter_start = time.time()
iter_cnt += 1
if _config.get('SHORT_EPOCH', False):
if i > 600:
break
print("Iterations done:", i)
if scheduler is not None:
scheduler.step()
epoch_time = time.time() - epoch_start
epoch_loss = epoch_loss / len(train_loader)
epoch_val = {k: v / len(train_loader) for k, v in epoch_val.items()}
print()
print("Epoch %3d: loss: %4.3f %4.1fs" % (epoch + 1, epoch_loss, epoch_time))
# evaluate
model.eval()
for c in callbacks:
c.on_epoch_end(model, epoch, epoch_loss, optimizer, epoch_val)
def set_requires_grad(module, requires_grad):
""" Helper function to set requires_grad on all parameters of the model. """
for param in module.parameters():
param.requires_grad = requires_grad
def eval_results(pred3d, gt3d, joint_set, verbose=True, pck_threshold=150, pctiles=[99]):
"""
Evaluates the results by printing various statistics. Also returns those results.
Poses can be represented either in hipless 16 joints or 17 joints with hip format.
Order is MuPo-TS order in all cases.
Parameters:
pred3d: dictionary of predictions in mm, seqname -> (nSample, [16|17], 3)
gt3d: dictionary of ground truth in mm, seqname -> (nSample, [16|17], 3)
joint_set; JointSet instance describing the order of joints
verbose: if True, a table of the results is printed
pctiles: list of percentiles of the errors to calculate
Returns:
sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles
"""
has_hip = list(pred3d.values())[0].shape[1] == joint_set.NUM_JOINTS # whether it contains the hip or not
sequence_mpjpes = {}
sequence_pcks = {}
sequence_pctiles = {}
all_errs = []
for k in sorted(pred3d.keys()):
pred = pred3d[k]
gt = gt3d[k]
assert pred.shape == gt.shape, "Pred shape:%s, gt shape:%s" % (pred.shape, gt.shape)
assert (not has_hip and pred.shape[1:] == (joint_set.NUM_JOINTS - 1, 3)) or \
(has_hip and pred.shape[1:] == (joint_set.NUM_JOINTS, 3)), \
"Unexpected shape:" + str(pred.shape)
errs = np.linalg.norm(pred - gt, axis=2, ord=2) # (nSample, nJoints)
sequence_pctiles[k] = np.nanpercentile(errs, pctiles)
sequence_pcks[k] = np.nanmean((errs < pck_threshold).astype(np.float64))
sequence_mpjpes[k] = np.nanmean(errs)
# Adjusting results for missing hip
if not has_hip:
N = float(joint_set.NUM_JOINTS)
sequence_pcks[k] = sequence_pcks[k] * ((N - 1) / N) + 1. / N
sequence_mpjpes[k] = sequence_mpjpes[k] * ((N - 1) / N)
all_errs.append(errs)
all_errs = np.concatenate(all_errs) # errors per joint, (nPoses, nJoints)
joint_mpjpes = np.nanmean(all_errs, axis=0)
joint_pctiles = np.nanpercentile(all_errs, pctiles, axis=0)
num_joints = joint_set.NUM_JOINTS if has_hip else joint_set.NUM_JOINTS - 1
assert_shape(all_errs, (None, num_joints))
assert_shape(joint_mpjpes, (num_joints,))
assert_shape(joint_pctiles, (len(pctiles), num_joints))
if verbose:
joint_names = joint_set.NAMES.copy()
if not has_hip:
joint_names = np.delete(joint_names, joint_set.index_of('hip')) # remove root
# Index of the percentile that will be printed. If 99 is calculated it is selected,
# otherwise the last one
pctile_ind = len(pctiles) - 1
if 99 in pctiles:
pctile_ind = pctiles.index(99)
print("----- Per sequence and joint errors in millimeter on the validation set ----- ")
print("%s %6s %5s %6s \t %22s %6s %6s" % ('Sequence', 'Avg', 'PCK', str(pctiles[pctile_ind]) + '%', '',
'Avg', str(pctiles[pctile_ind]) + '%'))
for seq, joint_id in zip_longest(sorted(pred3d.keys()), range(num_joints)):
if seq is not None:
seq_str = "%-8s: %6.2f mm %4.1f%% %6.2f mm\t " \
% (str(seq), sequence_mpjpes[seq], sequence_pcks[seq] * 100, sequence_pctiles[seq][pctile_ind])
else:
seq_str = " " * 49
if joint_id is not None:
print('%s%15s (#%2d): %6.2f mm %6.2f mm ' % (seq_str, joint_names[joint_id], joint_id,
joint_mpjpes[joint_id], joint_pctiles[pctile_ind, joint_id]))
else:
print(seq_str)
mean_sequence_err = np.mean(np.asarray(list(sequence_mpjpes.values()), dtype=np.float32))
print("\nMean sequence error (Absolute MPJPE) is %6.2f mm" % mean_sequence_err)
print("---------------------------------------------------------------- ")
print("MRPE: %.1f" % np.mean([mrpe(pred3d[k], gt3d[k], joint_set) for k in gt3d.keys()]))
return sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_mpjpes, joint_pctiles
| 13,551 | 35.926431 | 136 | py |
pose_refinement | pose_refinement-master/src/training/__init__.py | 0 | 0 | 0 | py | |
pose_refinement | pose_refinement-master/src/training/preprocess.py | import numpy as np
import torch
from databases.datasets import PoseDataset
from databases.joint_sets import Common14Joints, CocoExJoints, MuPoTSJoints
from util.misc import assert_shape, load
from util.pose import remove_root, remove_root_keepscore, combine_pose_and_trans
def preprocess_2d(data, fx, cx, fy, cy, joint_set, root_name):
"""
2D data preprocessing, performing the following:
1. Keeps only COMMON14 joints
2. Normalizes coordinates by multiplying with the inverse of the calibration matrix
3. Converts numbers in a root-relative form
4. Invisible joints are replaced by a single value
5. Convert data into float
:param data: (nPoses, 25, 3[x, y, scores]) - OpenPose detected coordinates
:param fx: ndarray(nPoses) or float, horizontal focal length
:param cx: ndarray(nPoses) or float, horizontal principal point
:param fy: ndarray(nPoses) or float, vertical focal length
:param cy: ndarray(nPoses) or float, horizontal principal point
:param joint_set: the JointSet object describing the order of joints
:param root_name: name of the root joint, must be a COMMON14 joint
:return: ndarray(nPoses, 42), First 39 numbers are the non-root joints, last one is the root
"""
# return data# rest is 60ms
assert_shape(data, ("*", None, joint_set.NUM_JOINTS, 3))
assert not isinstance(fx, np.ndarray) or len(fx) == len(data)
assert not isinstance(fy, np.ndarray) or len(fy) == len(data)
# negligible
if isinstance(fx, np.ndarray):
N = len(data)
shape = [1] * (data.ndim - 1)
shape[0] = N
fx = fx.reshape(shape)
fy = fy.reshape(shape)
cx = cx.reshape(shape)
cy = cy.reshape(shape)
data = data[..., joint_set.TO_COMMON14, :]
# This is 100ms
data[..., :, 0] -= cx
data[..., :, 1] -= cy
data[..., :, 0] /= fx
data[..., :, 1] /= fy
root_ind = np.where(Common14Joints.NAMES == root_name)[0][0]
root2d = data[..., root_ind, :].copy() # negligible
# 70ms
data = remove_root_keepscore(data, root_ind) # (nPoses, 13, 3), modifies data
# print(data.dtype)
# negligible
bad_frames = data[..., 2] < 0.1
# replace joints having low scores with 1700/focus
# this is to prevent leaking cx/cy
# this is 140ms
if isinstance(fx, np.ndarray):
fx = np.tile(fx, (1,) + data.shape[1:-1])
fy = np.tile(fy, (1,) + data.shape[1:-1])
data[bad_frames, 0] = -1700 / fx[bad_frames]
data[bad_frames, 1] = -1700 / fy[bad_frames]
else:
data[bad_frames, 0] = -1700 / fx
data[bad_frames, 1] = -1700 / fy
# print(data.dtype)
# stack root next to the pose
data = data.reshape(data.shape[:-2] + (-1,)) # (nPoses, 13*3)
# negligible/70ms
data = np.concatenate([data, root2d], axis=-1) # (nPoses, 14*3)
return data
def preprocess_3d(data, add_root, log_root_z, joint_set, root_name):
"""
3D preprocessing:
1. Removes the root joint
2. If add_root is True, append the root joint at the end of the pose. The
The logarithm of the z coordinate of the root is taken.
3. Flattens the data.
:param data: ndarray(nFrames, [nPoses], nJoints, 3[x, y, z]) 3D coordinates in MuPoTS order
:param add_root: True if the absolute coordinates of the hip should be included in the output
:param log_root_z:if true, the log of the z coordinate of the root is used
:param root_name: name of the root joint, must be a MuPoTS joint
:return: ndarray(nPoses, 3*nJoints|3*(nJoints-1)), 3*nJoints if add_root is true otherwise 3*(nJoints-1)
"""
assert_shape(data, ("*", joint_set.NUM_JOINTS, 3))
root_ind = joint_set.index_of(root_name)
root3d = data[..., root_ind, :].copy()
if log_root_z:
root3d[..., 2] = np.log(root3d[..., 2])
data = remove_root(data, root_ind) # (nFrames, [nPoses], nJoints-1, 3)
data = data.reshape(data.shape[:-2] + (-1,)) # (nFrames, [nPoses], (nJoints-1)*3)
if add_root:
data = np.concatenate([data, root3d], axis=-1) # (nFrames, [nPoses], nJoints*3)
return data.astype('float32')
class RemoveIndex(object):
"""
Deletes the 'meta' field from the data item, useful for cleaning up for batching.
"""
def __call__(self, sample):
sample.pop('index', None)
return sample
@staticmethod
def from_state(state, dataset):
return RemoveIndex()
class ToTensor(object):
""" Converts ndarrays in sample to pytorch tensors. Expects dicts as inputs. """
def __call__(self, sample):
return {k: torch.from_numpy(v) if isinstance(v, np.ndarray) else torch.tensor(v) for k, v in sample.items()}
class Identity(object):
"""
Does nothing.
"""
def __init__(self, dataset=None):
self.mean = 0
self.std = 1
@staticmethod
def from_file(path):
return Identity()
@staticmethod
def from_state(path):
return Identity()
def state_dict(self):
return {}
def __call__(self, sample):
return sample
class BaseNormalizer(object):
"""
Baseclass for preprocessors that normalize a field.
Subclasses must set the field_name field by themselves, outside the constructor.
They must also have the constructor to accept a single 'None' argument, that does
not preload the parameters.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
@classmethod
def from_file(cls, path):
state = load(path)
return cls.from_state(state)
@classmethod
def from_state(cls, state):
"""
Path is a pkl file that contains mean and std.
"""
instance = cls(None)
instance.mean = state['mean']
instance.std = state['std']
return instance
def state_dict(self):
return {'mean': self.mean, 'std': self.std, 'field_name': self.field_name}
def __call__(self, sample):
sample[self.field_name] = (sample[self.field_name] - self.mean) / self.std
return sample
class MeanNormalize2D(BaseNormalizer):
"""
Normalizes the input 3D pose with mean and std.
"""
def __init__(self, dataset):
"""
Parameters:
dataset: either a numpy array containing the 3D poses or a PanopticSinglePersonDataset
"""
self.field_name = 'pose2d'
if dataset is None:
# mean and std must be set manually later
return
if not isinstance(dataset, np.ndarray):
dataset = dataset.poses2d
assert isinstance(dataset, np.ndarray), "Expected dataset to be either a PanopticSinglePersonDataset or a numpy array, got:" + str(
type(dataset))
# data = dataset.reshape((len(dataset), -1))
# data = dataset.reshape((-1, dataset.shape[-1]))
data = dataset
super().__init__(np.nanmean(data, axis=0), np.nanstd(data, axis=0))
class MeanNormalize3D(BaseNormalizer):
"""
Normalizes the input 3D pose with mean and std.
"""
def __init__(self, dataset):
"""
Parameters:
dataset: either a numpy array containing the 3D poses or a PanopticSinglePersonDataset
"""
self.field_name = 'pose3d'
if dataset is None:
# mean and std must be set manually later
return
if isinstance(dataset, PoseDataset):
dataset = dataset.poses3d
assert isinstance(dataset, np.ndarray), "Expected dataset to be either a PanopticSinglePersonDataset or a numpy array"
# data = dataset.reshape((len(dataset), -1))
data = dataset
super().__init__(np.nanmean(data, axis=0), np.nanstd(data, axis=0))
class SplitToRelativeAbsAndMeanNormalize3D(object):
"""
Splits the 3D poses into relative+absolute and then normalizes it. It is uses the same
preprocessing mechanics as the Depthpose paper did.
"""
def __init__(self, dataset, normalizer=None, cache=False, log_root_z=True):
"""
:param dataset: The full dataset, required if no normalizer is provided or ``cache`` is True.
:param normalizer: the Normalizer object to be applied on the preprocessed data. If None,
the normalizer parameters are calculated from the dataset.
:param cache: If True, preprocessed values are saved and not calculated every time during training.
Potenially speed up training.
"""
if cache or normalizer is None:
assert dataset is not None, "dataset must be defined if cache==true or no normalizer provided"
self.cache = cache
self.log_root_z = log_root_z
if dataset is not None:
self.joint_set = dataset.pose3d_jointset
preprocessed3d = preprocess_3d(dataset.poses3d, True, log_root_z, self.joint_set, 'hip')
if normalizer is None:
normalizer = MeanNormalize3D(preprocessed3d)
if cache:
self.preprocessed3d = (preprocessed3d - normalizer.mean) / normalizer.std
assert isinstance(normalizer, MeanNormalize3D), \
"Unexpected normalizer type: " + str(type(normalizer))
self.normalizer = normalizer
@classmethod
def from_file(cls, path, dataset):
state = load(path)
return cls.from_state(state, dataset)
@classmethod
def from_state(cls, state, dataset):
"""
Path is a pkl file that contains mean and std.
"""
instance = cls(dataset, MeanNormalize3D.from_state(state), cache=False)
if dataset is None:
set_name = state['joint_set']
if "<class '" in set_name: # fixing incorrectly formatted type name
set_name = set_name[set_name.rindex('.') + 1:-2]
instance.joint_set = globals()[set_name]()
return instance
def state_dict(self):
state = self.normalizer.state_dict()
state['joint_set'] = type(self.joint_set).__name__
return state
def __call__(self, sample):
# Note: this algorithm makes iterating over all examples 9s slower, seems acceptable
# pose3d = sample['pose3d'] # shape is (, nJoints*3)
# preprocessed = preprocess_3d(pose3d.reshape((self.num_joints, 3)), True, PanopticJoints(), 'hip')
if self.cache:
preprocessed = self.preprocessed3d[sample['index']]
sample['pose3d'] = preprocessed
else:
pose3d = sample['pose3d'] # shape is ([nPoses],nJoints, 3)
preprocessed = preprocess_3d(pose3d, True, self.log_root_z, self.joint_set, 'hip')
sample['pose3d'] = preprocessed
sample = self.normalizer(sample)
return sample
class DepthposeNormalize2D(object):
"""
Normalizes the 2D pose using the technique in Depthpose.
"""
def __init__(self, dataset, normalizer=None, cache=False):
"""
:param dataset: The full dataset, required if no normalizer is provided or ``cache`` is True.
:param normalizer: the Normalizer object to be applied on the preprocessed data. If None,
the normalizer parameters are calculated from the dataset.
:param cache: If True, preprocessed values are saved and not calculated every time during training.
Potenially speed up training.
"""
if cache or normalizer is None:
assert dataset is not None, "dataset must be defined if cache==true or no normalizer provided"
self.cache = cache
if dataset is not None:
preprocessed2d = preprocess_2d(dataset.poses2d.copy(), dataset.fx, dataset.cx, dataset.fy, dataset.cy,
dataset.pose2d_jointset, 'hip')
if normalizer is None:
normalizer = MeanNormalize2D(preprocessed2d)
if cache:
self.preprocessed2d = (preprocessed2d - normalizer.mean) / normalizer.std
self.normalizer = normalizer
self.dataset = dataset
assert isinstance(self.normalizer, MeanNormalize2D), \
"Unexpected normalizer type: " + str(type(normalizer))
@classmethod
def from_file(cls, path, dataset):
state = load(path)
return cls.from_state(state, dataset)
@classmethod
def from_state(cls, state, dataset):
instance = cls(dataset, MeanNormalize2D.from_state(state), cache=False)
return instance
def state_dict(self):
return self.normalizer.state_dict()
def __call__(self, sample):
if self.cache:
sample['pose2d'] = self.preprocessed2d[sample['index']]
else:
pose2d = sample['pose2d'] # shape is ([nPoses],nJoints, 3)
single_item = sample['pose2d'].ndim == 2
if single_item:
pose2d = np.expand_dims(pose2d, axis=0)
ind = sample['index']
preprocessed = preprocess_2d(pose2d.copy(), self.dataset.fx[ind], sample['cx'],
self.dataset.fy[ind], self.dataset.cy[ind],
self.dataset.pose2d_jointset, 'hip')
if single_item:
preprocessed = preprocessed[0]
sample['pose2d'] = preprocessed
sample = self.normalizer(sample)
return sample
class SaveableCompose(object):
def __init__(self, transforms):
self.transforms = transforms
@staticmethod
def from_file(path, dataset, locals):
state = load(path)
return SaveableCompose.from_state(state, dataset, locals)
@staticmethod
def from_state(state, dataset, locals):
"""
Path is a pkl file that contains mean and std.
"""
transforms = []
for d in state:
if d['name'] == 'function':
t = globals()[d['state']['name']]
elif d['name'] == 'FuncAndNormalizeWrapper':
func = eval(d['state']['func_def'], globals(), locals)
t = FuncAndNormalize.from_state(func, d['state'], dataset)
else:
t = globals()[d['name']].from_state(d['state'], dataset)
transforms.append(t)
return SaveableCompose(transforms)
def state_dict(self):
state = []
for t in self.transforms:
name = type(t).__name__
if name == 'function':
s = {'name': t.__name__}
else:
s = t.state_dict() if hasattr(t, 'state_dict') else None
state.append({'name': name, 'state': s})
return state
def __call__(self, sample):
for t in self.transforms:
sample = t(sample)
return sample
def log_keep_hrnet_c14(data):
return keep_hrnet_c14(np.log(data))
def keep_hrnet_c14(data):
"""
Keeps only COMMON-14 joints from hrnet.
data - ndarray(..., 19), along the last dimension, each slice corresponds to a joint In CocoEx joint order.
"""
assert_shape(data, ('*', None, CocoExJoints.NUM_JOINTS))
data = data[..., CocoExJoints.TO_COMMON14]
return data
def zero_and_log_hrnet_c14(data):
data = keep_hrnet_c14(np.log(data))
data[np.isnan(data)] = 2
return data
def zero_and_hrnet_c14(data):
data = keep_hrnet_c14(data)
data[np.isnan(data)] = 0
return data
def decode_trfrm(transform_name, locals=None):
"""
Converts a description of a transformation name into an actual transformation.
Parameters:
transform_name: Either the name of a Preprocess class, or as string in form 'FN(<field>, <func>)'.
In the second case a FuncAndNormalize class is created.
locals: dict that contains the defined functions in the current scope. Useful for calling this function
from outside preprocess.py where there are additional functions.
"""
names = dict(globals())
if locals is not None:
names.update(locals)
return names[transform_name]
def get_postprocessor(config, test_set, normalizer3d):
if config['preprocess_3d'] == 'SplitToRelativeAbsAndMeanNormalize3D':
def f(x, seq):
scale = 1 if isinstance(test_set.pose3d_jointset, MuPoTSJoints) else 1000
return scale * combine_pose_and_trans(x, normalizer3d.std, normalizer3d.mean, test_set.pose3d_jointset, "hip")
return f
else:
raise NotImplementedError('No unconverter for 3D preprocessing: ' + config['preprocess_3d'])
| 16,694 | 33.853862 | 139 | py |
pose_refinement | pose_refinement-master/src/util/mx_tools.py | import numpy as np
def project_points(calib, points3d):
"""
Projects 3D points using a calibration matrix.
Parameters:
points3d: ndarray of shape (nPoints, 3)
"""
assert points3d.ndim == 2 and points3d.shape[1] == 3
p = np.empty((len(points3d), 2))
p[:, 0] = points3d[:, 0] / points3d[:, 2] * calib[0, 0] + calib[0, 2]
p[:, 1] = points3d[:, 1] / points3d[:, 2] * calib[1, 1] + calib[1, 2]
return p
def calibration_matrix(points2d, points3d):
"""
Calculates camera calibration matrix (no distortion) from 3D points and their projection.
Only works if all points are away from the camera, eg all z coordinates>0.
Returns:
calib, reprojection error, x residuals, y residuals, x singular values, y singular values
"""
assert points2d.ndim == 2 and points2d.shape[1] == 2
assert points3d.ndim == 2 and points3d.shape[1] == 3
A = np.column_stack([points3d[:, 0] / points3d[:, 2], np.ones(len(points3d))])
px, resx, _, sx = np.linalg.lstsq(A, points2d[:, 0], rcond=None)
A = np.column_stack([points3d[:, 1] / points3d[:, 2], np.ones(len(points3d))])
py, resy, _, sy = np.linalg.lstsq(A, points2d[:, 1], rcond=None)
calib = np.eye(3)
calib[0, 0] = px[0]
calib[1, 1] = py[0]
calib[0, 2] = px[1]
calib[1, 2] = py[1]
# Calculate mean reprojection error
# p = np.empty((len(points3d), 2))
# p[:, 0] = points3d[:, 0] / points3d[:, 2] * calib[0, 0] + calib[0, 2]
# p[:, 1] = points3d[:, 1] / points3d[:, 2] * calib[1, 1] + calib[1, 2]
p = project_points(calib, points3d)
reproj = np.mean(np.abs(points2d - p))
return calib, reproj, resx, resy, sx, sy
| 1,694 | 32.235294 | 97 | py |
pose_refinement | pose_refinement-master/src/util/misc.py | import json
import os
import pickle
import numpy as np
import scipy.io
def ensuredir(path):
"""
Creates a folder if it doesn't exists.
:param path: path to the folder to create
"""
if len(path) == 0:
return
if not os.path.exists(path):
os.makedirs(path)
def load(path, pkl_py2_comp=False):
"""
Loads the content of a file. It is mainly a convenience function to
avoid adding the ``open()`` contexts. File type detection is based on extensions.
Can handle the following types:
- .pkl: pickles
- .txt: text files, result is a list of strings ending whitespace removed
:param path: path to the file
:param pkl_py2_comp: if True, when loading a pickle adds Python 2 compatibility
"""
if path.endswith('.pkl'):
with open(path, 'rb') as f:
if pkl_py2_comp:
return pickle.load(f, encoding='latin1')
else:
return pickle.load(f)
elif path.endswith('.npy'):
return np.load(path)
elif path.endswith('.txt'):
with open(path, 'r') as f:
return [x.rstrip('\n\r') for x in list(f)]
elif path.endswith('.mat'):
return scipy.io.loadmat(path)
elif path.endswith('.json'):
with open(path, 'r') as f:
return json.load(f)
else:
raise NotImplementedError("Unknown extension: " + os.path.splitext(path)[1])
def save(path, var, varname=None):
"""
Saves the variable ``var`` to the given path. The file format depends on the file extension.
List of supported file types:
- .pkl: pickle
- .npy: numpy
- .mat: matlab, needs ``varname`` keyword argument defined
"""
ensuredir(os.path.dirname(path))
if path.endswith(".pkl"):
with open(path, 'wb') as f:
pickle.dump(var, f, 2)
elif path.endswith(".mat"):
assert varname is not None, "when using matlab format the variable name must be defined"
scipy.io.savemat(path, {varname: var})
elif path.endswith(".npy"):
np.save(path, var)
elif path.endswith('.json'):
with open(path, 'w') as f:
json.dump(var, f, indent=2, sort_keys=True)
elif path.endswith(".txt"):
with open(path, 'w') as f:
if isinstance(var, str):
f.write(var)
else:
for i in var:
f.write(i)
f.write('\n')
else:
raise NotImplementedError("Unknown extension: " + os.path.splitext(path)[1])
def assert_shape(data, shape):
"""
Asserts a numpy array's shape. The shape is a tuple, describing a pattern of shape:
- An integer means the dimension must be the exact same size at that position
- None means any value is matched
- * mean any number of values are matched. corresponds to '...' in indexing
:param data: a numpy array
:param shape: a tuple or list
"""
star_pos = len(shape)
for i, j in enumerate(shape):
if j == "*":
if star_pos < len(shape):
raise Exception("Only one asterisk (*) character allowed")
star_pos = i
assert len(data.shape) >= (len(shape) if star_pos == len(shape) else len(shape) - 1), "Unexpected shape: " + str(data.shape)
for i in range(0, star_pos):
if shape[i] is not None:
assert data.shape[i] == shape[i], "Unexpected shape: " + str(data.shape)
for i in range(star_pos + 1, len(shape)):
ind = i - len(shape)
if shape[ind] is not None:
assert data.shape[ind] == shape[ind], "Unexpected shape: " + str(data.shape)
| 3,651 | 31.035088 | 128 | py |
pose_refinement | pose_refinement-master/src/util/pose.py | import numpy as np
from databases.joint_sets import CocoExJoints
from util.misc import assert_shape
def harmonic_mean(a, b, eps=1e-6):
return 2 / (1 / (a + eps) + 1 / (b + eps))
def _combine(data, target, a, b):
"""
Modifies data by combining (taking average) joints at index a and b at position target.
"""
data[:, target, :2] = (data[:, a, :2] + data[:, b, :2]) / 2
data[:, target, 2] = harmonic_mean(data[:, a, 2], data[:, b, 2])
def extend_hrnet_raw(raw):
"""
Adds the hip and neck to a Coco skeleton by averaging left/right hips and shoulders.
The score will be the harmonic mean of the two.
"""
assert_shape(raw, (None, 17, 3))
js = CocoExJoints()
result = np.zeros((len(raw), 19, 3), dtype='float32')
result[:, :17, :] = raw
_combine(result, js.index_of('hip'), js.index_of('left_hip'), js.index_of('right_hip'))
_combine(result, js.index_of('neck'), js.index_of('left_shoulder'), js.index_of('right_shoulder'))
return result
def insert_zero_joint(data, ind):
""" Adds back a root with zeros in a hip-relative pose.
:param ind: the root will be inserted here
"""
assert data.ndim >= 2
shape = list(data.shape)
shape[-2] += 1
result = np.zeros(shape, dtype=data.dtype)
result[..., :ind, :] = data[..., :ind, :]
result[..., ind + 1:, :] = data[..., ind:, :]
return result
def remove_root(data, root_ind):
"""
Removes a joint from a dataset by moving it to the origin and removing it from the array.
:param data: (..., nJoints, 2|3) array
:param root_ind: index of the joint to be removed
:return: (..., nJoints-1, 2|3) array
"""
assert data.ndim >= 2 and data.shape[-1] in (2, 3)
roots = data[..., [root_ind], :] # (..., 1, [2|3])
data = data - roots
data = np.delete(data, root_ind, axis=-2)
return data
def remove_root_keepscore(data, root_ind):
"""
Removes a joint from a 2D dataset by moving to the origin and removing it from the array.
The difference to remove_root is that the third column stores the confidence score and it is
not changed.
:param data: (nPoses, nJoints, 3[x,y,score]) array
:param root_ind: index of the joint to be removed
:return: (nPoses, nJoints-1, 3[x,y,score]) array
"""
assert data.ndim >= 3 and data.shape[-1] == 3, data.shape
roots = data[..., [root_ind], :2] # ndarray(...,1,2)
# roots = roots.reshape((len(roots), 1, 2))
data[..., :2] = data[..., :2] - roots
data = np.delete(data, root_ind, axis=-2)
return data
def combine_pose_and_trans(data3d, std3d, mean3d, joint_set, root_name, log_root_z=True):
"""
3D result postprocess: unnormalizes data3d and reconstructs the absolute pose from relative + absolute split.
Parameters:
data3d: output of the PyTorch model, ndarray(nPoses, 3*nJoints), in the format created by preprocess3d
std3d: normalization standard deviations
mean3d: normalization means
root_name: name of the root joint
log_root_z: The z coordinate of the depth is in logarithms
Returns:
ndarray(nPoses, nJoints, 3)
"""
assert_shape(data3d, (None, joint_set.NUM_JOINTS * 3))
data3d = data3d * std3d + mean3d
root = data3d[:, -3:]
rel_pose = data3d[:, :-3].reshape((len(data3d), joint_set.NUM_JOINTS - 1, 3))
if log_root_z:
root[:, 2] = np.exp(root[:, 2])
rel_pose += root[:, np.newaxis, :]
result = np.zeros((len(data3d), joint_set.NUM_JOINTS, 3), dtype='float32')
root_ind = joint_set.index_of(root_name)
result[:, :root_ind, :] = rel_pose[:, :root_ind, :]
result[:, root_ind, :] = root
result[:, root_ind + 1:, :] = rel_pose[:, root_ind:, :]
return result
def pose_interp(poses, good_frames):
"""
Interpolates invisible poses.
:param poses: (nPoses, nJoints, 3), the joint coordinates
:param good_frames: (nPoses), true if the pose is detected on the frame, false otherwise
:return: (nPoses, nHoints, 3), the inp
"""
assert len(poses) == len(good_frames)
assert poses.ndim == 3
poses = poses.copy()
frame_inds = np.arange(len(poses))
for i in range(poses.shape[1]):
for j in range(poses.shape[2]):
# interpolate poses[:,i,j]
poses[~good_frames, i, j] = np.interp(
frame_inds[~good_frames], frame_inds[good_frames], poses[good_frames, i, j])
return poses
HEIGHT_BONES = [['left_ankle', 'left_knee'], ['left_hip', 'left_knee'], ['hip', 'spine'], ['spine', 'neck']]
def _calc_limb_length(poses, joint_set, bones):
"""
calculates the length of a limb that contains multiple bones.
:param bones: list of (joint1, joint2) pairs, where joint1 and joint2 determines the bone.
:return: For each pose, the sum of the lengths of the bones in `bones`
"""
assert_shape(poses, ('*', joint_set.NUM_JOINTS, 3))
bone_inds = [[joint_set.index_of(j) for j in b] for b in bones]
height = np.zeros(poses.shape[:-2], dtype='float32')
for bone in bone_inds:
bones = poses[..., bone[0], :] - poses[..., bone[1], :] # (shapePose, 3)
bones = np.linalg.norm(bones, axis=-1) # (shapePose)
height += bones
return height
def pck(pred, gt, thresh):
""" Percentage of keypoints less than thresh mm away from the GT. """
return np.mean(np.linalg.norm(pred - gt, axis=-1) < thresh)
AUC_THRESHOLDS = np.arange(0, 151, 5)
def auc(pred, gt, thresholds=AUC_THRESHOLDS):
""" Calculates AUC of PCK. The default thresholds are the ones used by the MuPoTS evaluation script"""
errors = np.linalg.norm(pred - gt, axis=-1)
return np.mean([np.mean(errors < t) for t in AUC_THRESHOLDS])
def mpjpe(pred, gt):
assert_shape(pred, ('*', None, 3))
assert pred.shape == gt.shape
return np.mean(np.linalg.norm(gt - pred, axis=-1))
def r_mpjpe(pred, gt, joint_set):
pred = remove_root(pred, joint_set.index_of('hip'))
gt = remove_root(gt, joint_set.index_of('hip'))
return mpjpe(pred, gt)
def mrpe(pred, gt, joint_set):
""" Mean Roo Position Error. """
assert_shape(pred, ('*', None, 3))
assert pred.shape == gt.shape
hip_ind = joint_set.index_of('hip')
assert gt[..., hip_ind, :].shape[-1] == 3
return np.nanmean(np.linalg.norm(gt[..., hip_ind, :] - pred[..., hip_ind, :], axis=-1))
def optimal_scaling(pred, gt):
"""
Calculates optimal scaling factor for a given set of points. Optimal scaling is the scalar s,
with which the pred points scaled become the closest to gt points, in L2 sense.
:param pred: array(nFrames, nPoints, 3)
:param gt: array(nFrames, nPoints, 3)
:return: array(nFrames,3)
"""
assert pred.shape == gt.shape
assert_shape(pred, ('*', None, 3))
# Optimal scale transform
dot_pose_pose = np.sum(pred * pred, axis=(-1, -2)) # (nShape) torch.sum(torch.mul(pred,pred),1,keepdim=True)
dot_pose_gt = np.sum(pred * gt, axis=(-1, -2))
return dot_pose_gt / dot_pose_pose # (nShape), the optimal scaling factor s
def rn_mpjpe(pred, gt, root_ind):
"""
N-MPJPE, when optimal scaling factor is calculated on relative pose.
This hsould be a good comparison to height based scaling
Based on https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning, losses/poses.py
"""
assert pred.shape == gt.shape
assert_shape(pred, ('*', None, 3))
s_opt = optimal_scaling(remove_root(pred, root_ind), remove_root(gt, root_ind))
return mpjpe(pred * s_opt[..., np.newaxis, np.newaxis], gt)
def n_mpjpe(pred, gt):
"""
Based on https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning, losses/poses.py
"""
assert pred.shape == gt.shape
assert_shape(pred, ('*', None, 3))
s_opt = optimal_scaling(pred, gt)
return mpjpe(pred * s_opt[..., np.newaxis, np.newaxis], gt)
| 7,952 | 30.939759 | 113 | py |
pose_refinement | pose_refinement-master/src/util/viz.py | """Functions to visualize human poses"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, ImageMagickWriter
from mpl_toolkits.mplot3d import proj3d
import cv2
def get_3d_axes(*subplot):
"""
Creates a 3D Matplotlib axis. The arguments are the same as of the `subplot` function of Matplotlib.
"""
if len(subplot) > 0:
return plt.subplot(*subplot, projection='3d')
else:
return plt.subplot(1, 1, 1, projection='3d')
# alias to get_3d_axes
subplot = get_3d_axes
def add3Dpose(pose, ax, joint_set, lcolor="#d82f2f", rcolor="#e77d3c", ccolor="#2e78d8", line_width=2):
"""
Plots a 3d skeleton on ``ax``.
Parameters:
pose: (nJoints, 3) ndarray. The pose to plot.
ax: matplotlib 3d axis to draw on
joint_set: JointSet object describing the joint orders.
lcolor: color for left part of the body
rcolor: color for right part of the body
ccolor: color for the center of the body
line_width: width of the limbs on the plot
"""
assert pose.shape == (joint_set.NUM_JOINTS, 3), pose.shape
# 0-right, 1-left, 2-center
side = np.array(joint_set.SIDEDNESS, dtype='int32')
colors = [rcolor, lcolor, ccolor]
# Make connection matrix
for i, limb in enumerate(joint_set.LIMBGRAPH):
x = pose[limb, 0]
y = pose[limb, 1]
z = pose[limb, 2]
# In Matplotlib z coordinate is the vertical axis and y is depth,
# so we have to switch them
y, z = z, y
ax.plot(x, y, z, lw=line_width, c=colors[side[i]])
def show3Dpose(poses, joint_set, ax=None, lcolor="#d82f2f", rcolor="#e77d3c", ccolor="#2e78d8", color=None,
add_labels=False, show_numbers=False,
set_axis=True, hide_panes=False, radius=None,
linewidth=2, invert_vertical=False):
"""
Visualize a 3d skeleton. By default, left side is red, right is orange.
Parameters:
poses: ([nPoses], nJoints, 3) ndarray. The poses to plot. The joints must be in x,y,z (horizontal, vertical, depth) order.
joint_set: JointSet object describing the joint orders.
ax: matplotlib 3d axis to draw on, if None a new one is created
lcolor: color for left part of the body
rcolor: color for right part of the body
ccolor: color for the center of the body (spine, head, neck)
color: color of the whole skeleton. If specififed, overwrites l/r/ccolor.
add_labels: whether to add coordinates to the plot
show_numbers: whether to show axis labels or not
set_axis: if True, the limits of the plot axes are automatically set based on `poses`.
radius: if set_axis is true, the half of the length of the viewport cube. If None, it is automatically inferred from data
linewidth: width of the limbs on the plot
invert_vertical: if true, the vertical axis grows downwards.
"""
if poses.ndim == 2:
poses = np.expand_dims(poses, 0)
assert poses.shape[1:] == (joint_set.NUM_JOINTS, 3), poses.shape
if color is not None:
rcolor = lcolor = ccolor = color
if ax is None:
ax = get_3d_axes()
for pose in poses:
add3Dpose(pose, ax, joint_set, lcolor, rcolor, ccolor, linewidth)
if set_axis:
# space around the subject, automatically detect if it is in meters or mms
if radius is None:
radius = 1.500 if np.max(poses[0, :, 0]) - np.min(poses[0, :, 0]) < 5 else 1500
xroot, yroot, zroot = np.nanmean(poses[:, joint_set.index_of("hip"), :], axis=0)
ax.set_xlim3d([-radius + xroot, radius + xroot])
ax.set_ylim3d([-radius + zroot, radius + zroot])
ax.set_zlim3d([-radius + yroot, radius + yroot])
if add_labels:
ax.set_xlabel("x")
ax.set_ylabel("z")
ax.set_zlabel("y")
# Get rid of the ticks and tick labels
if not show_numbers:
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
ax.set_zticklabels([])
if hide_panes:
white = (1.0, 1.0, 1.0, 0.0)
ax.w_xaxis.set_pane_color(white)
ax.w_yaxis.set_pane_color(white)
ax.w_zaxis.set_pane_color(white)
# Keep z pane
# Get rid of the lines in 3d
ax.w_xaxis.line.set_color(white)
ax.w_yaxis.line.set_color(white)
ax.w_zaxis.line.set_color(white)
if invert_vertical:
ax.invert_zaxis()
ax.set_aspect('auto')
def show2Dpose(pose, joint_set, ax=None, lcolor="#d82f2f", rcolor="#e77d3c", ccolor="#2e78d8", color=None, add_labels=False,
show_numbers=False, line_width=2, radius=500):
"""
Visualize a 2d skeleton.
Parameters:
pose: (nJoints, 2) ndarray, the pose to draw.
joint_set: The JointSet object that describes the joint order.
ax: matplotlib axis to draw on
lcolor: color for left part of the body
rcolor: color for right part of the body
ccolor: color for the middle of the body
add_labels: whether to add coordinate labels
show_numbers: whether to show axis labels or not
line_width: width of the plotted lines in pixels
radius: half-width of the output image
"""
assert pose.shape == (joint_set.NUM_JOINTS, 2), "Unexpected shape for pose:" + str(pose.shape)
if color is not None:
rcolor = lcolor = ccolor = color
if ax is None:
ax = plt.gca()
# 0-right, 1-left, 2-center
side = np.array(joint_set.SIDEDNESS, dtype='int32')
colors = [rcolor, lcolor, ccolor]
# Make connection matrix
for i, limb in enumerate(joint_set.LIMBGRAPH):
x = pose[limb, 0]
y = pose[limb, 1]
ax.plot(x, y, lw=line_width, c=colors[side[i]])
if not show_numbers:
# Get rid of the ticks
ax.set_xticks([])
ax.set_yticks([])
# Get rid of tick labels
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
# set space around the subject
hip_ind = joint_set.index_of('hip')
xroot, yroot = pose[hip_ind, 0], pose[hip_ind, 1]
ax.set_xlim([-radius + xroot, radius + xroot])
ax.set_ylim([-radius + yroot, radius + yroot])
if add_labels:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_aspect('equal')
ax.invert_yaxis()
def draw_points(img, points, radius, color, thickness=-1):
"""
Draws a set of points on img, also does rounding if necessary
"""
assert points.ndim == 2 and points.shape[1] == 2
points = np.around(points).astype('int32')
for p in points:
cv2.circle(img, (p[0], p[1]), radius, color, thickness)
return img
def draw_bboxes_ltrb(img, bboxes, color, thickness):
"""
Draws bounding boxes in an image.
:param bboxes: array[nBoxes, (left, top, right, bottom, [score])] score is optional
"""
assert bboxes.ndim == 2 and bboxes.shape[1] in (4, 5)
for bbox in bboxes:
bbox = np.around(bbox).astype('int32')
cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, thickness)
def draw2Dpose(frame, pose, joint_set, lcolor=(216, 47, 47), rcolor=(231, 125, 60), ccolor=(46, 120, 216), line_width=2,
color=None, score_threshold=0.4):
"""
Draws a 2d skeleton on an image. Optionally filters points and edges by scores
Parameters:
frame: the image to draw on
pose: (nJoints, [x,y,score]) vector, score is optional.
joint_set: JointSet object describing the joint order.
lcolor: color for left part of the body
rcolor: color for right part of the body
ccolor: color for the middle of the body
color: color of the full skeleton, overrides previous colors if specified
line_width: width of the plotted lines in pixels
score_threshold: joints with scores lower than this value are not drawn. Only applicable if ``pose`` contains scores.
"""
assert pose.shape == (joint_set.NUM_JOINTS, 3) or pose.shape == (joint_set.NUM_JOINTS, 2), \
"pose must have a shape of (%d, 2|3), it has %s instead" % (joint_set.NUM_JOINTS, str(pose.shape))
coords = np.around(pose[:, :2]).astype('int32')
# cut scores from pose
if pose.shape[-1] == 3:
scores = pose[:, 2]
else:
scores = np.ones(pose.shape[:-1])
if color is not None:
rcolor = lcolor = ccolor = color
# 0-right, 1-left, 2-center
side = np.array(joint_set.SIDEDNESS, dtype='int32')
colors = [rcolor, lcolor, ccolor]
# Draw joints
for i, p in enumerate(coords):
if scores[i] > score_threshold:
cv2.circle(frame, (p[0], p[1]), 2 * line_width, (255, 0, 0), -1)
# Draw limbs
for i, limb in enumerate(joint_set.LIMBGRAPH):
x = coords[limb, 0]
y = coords[limb, 1]
# Draw limb if both points were found
if np.all(scores[list(limb)] > score_threshold):
cv2.line(frame, (x[0], y[0]), (x[1], y[1]), colors[side[i]], line_width, cv2.LINE_AA)
# ax.plot(x, y, lw=line_width, c=)
def generate_rotating_pose(out_file, pose, joint_set, initial_func=None):
"""
Creates a gif that rotates the camera around `pose`. You might want to call ``plt.ioff()`` before
using this function.
Parameters:
out_file: output file name
pose: ndarray(nJoints,3), coordinates are in x, y, z order. Y grows downwards.
joint_set: joint order of `pose`
initial_func: if not None, a function that generates the initial plot that is going to be rotated. If provided, pose and
joint_set must be NaN
"""
if initial_func is not None:
assert pose is None, "if initial_func is set, pose must be None"
assert joint_set is None, "if initial_func is set, joint_set must be None"
start_azim = -190
end_azim = -0
azim_steps = 30
start_elev = 5
end_elev = 70
elev_steps = 15
total_steps = azim_steps * 2 + elev_steps * 2
def update(i):
middle_azim = (start_azim + end_azim) / 2
middle_elev = (start_elev + end_elev) / 2
if i < azim_steps / 2:
ax.view_init(azim=middle_azim + (start_azim - middle_azim) * i / azim_steps * 2, elev=middle_elev)
if azim_steps / 2 <= i < azim_steps * 1.5:
ax.view_init(azim=start_azim + (end_azim - start_azim) * (i - azim_steps / 2) / azim_steps, elev=middle_elev)
elif azim_steps * 1.5 <= i < azim_steps * 2:
ax.view_init(azim=end_azim + (middle_azim - end_azim) * (i - azim_steps * 1.5) / azim_steps * 2, elev=middle_elev)
elif azim_steps * 2 <= i < azim_steps * 2 + elev_steps / 2:
ax.view_init(azim=middle_azim, elev=middle_elev + (start_elev - middle_elev) * (i - azim_steps * 2) / elev_steps * 2)
elif azim_steps * 2 + elev_steps / 2 <= i < azim_steps * 2 + elev_steps * 1.5:
ax.view_init(azim=middle_azim, elev=start_elev + (end_elev - start_elev) *
(i - azim_steps * 2 - elev_steps / 2) / elev_steps)
elif azim_steps * 2 + elev_steps * 1.5 <= i:
ax.view_init(azim=middle_azim, elev=end_elev + (middle_elev - end_elev) *
(i - azim_steps * 2 - elev_steps * 1.5) / elev_steps * 2)
def first_frame():
show3Dpose(pose, joint_set, ax, invert_vertical=True, linewidth=1, set_axis=False, hide_panes=False)
RADIUS = 900
xroot, yroot, zroot = np.mean(pose[:, joint_set.index_of('hip'), :], axis=0)
ax.set_xlim3d([-RADIUS + xroot, RADIUS + xroot])
ax.set_ylim3d([-RADIUS + zroot, RADIUS + zroot])
ax.set_zlim3d([-RADIUS + yroot, RADIUS + yroot])
ax.invert_zaxis()
ax.set_aspect('equal')
if initial_func is None:
initial_func = first_frame
ax = get_3d_axes()
plt.tight_layout()
fps = 10
anim = FuncAnimation(plt.gcf(), update, frames=total_steps,
interval=1000 / fps, init_func=initial_func, repeat=False)
writer = ImageMagickWriter(fps=fps)
anim.save(out_file, writer=writer)
| 12,276 | 35.322485 | 129 | py |
pose_refinement | pose_refinement-master/src/util/__init__.py | 0 | 0 | 0 | py | |
pose_refinement | pose_refinement-master/src/scripts/generate_muco_temp.py | """
generates the muco_temp synthetic dataset. In order to use this script, you already have to have to have generated
the sequence meta data files in 'sequence_meta.pkl' and the ground-truth poses. The scripts can be found in mpi_inf_3dhp.ipynb
"""
from databases import mpii_3dhp, muco_temp
from databases.joint_sets import MuPoTSJoints
import numpy as np
from util.misc import ensuredir, load
import os
import cv2
from multiprocessing import Pool
NUM_FRAMES = 2000
def generate_vid_frames(cam, vid_id):
print(cam, vid_id)
metas = sequence_metas[cam][vid_id]
steps = [2 if mpii_3dhp.get_train_fps(meta[0], meta[1]) == 50 else 1 for meta in metas]
out_folder = os.path.join(muco_temp.MUCO_TEMP_PATH, 'frames/cam_%d/vid_%d' % (cam, vid_id))
ensuredir(out_folder)
gt_poses = load(os.path.join(muco_temp.MUCO_TEMP_PATH, 'frames/cam_%d/gt.pkl' % cam))[vid_id]['annot3']
hip_ind = MuPoTSJoints().index_of('hip')
for i in range(NUM_FRAMES):
# generate frame
depths = gt_poses[i, :, hip_ind, 2]
ordered_poses = np.argsort(depths)[::-1] # poses ordered by depth in decreasing order
bg_ind = ordered_poses[0]
img = mpii_3dhp.get_image(metas[bg_ind][0], metas[bg_ind][1], cam, metas[bg_ind][2] + i * steps[bg_ind], rgb=False)
img = img.astype('float32')
# add new pose onto image
for pose_ind in ordered_poses[1:]:
sub, seq, start = metas[pose_ind]
pose_img = mpii_3dhp.get_image(sub, seq, cam, start + i * steps[pose_ind], rgb=False)
# mask is 0 at greenscreen bg, 1 at foreground (body, chair)
mask = mpii_3dhp.get_mask(sub, seq, cam, start + i * steps[pose_ind], 'FGmasks')[:, :, 2] / 255.
mask = cv2.GaussianBlur(mask, (0, 0), 2)[:, :, np.newaxis]
# chair_mask is 0 at chair, 1 everywhere else
chair_mask = mpii_3dhp.get_mask(sub, seq, cam, start + i * steps[pose_ind], 'ChairMasks')[:, :, [2]] / 255
img = chair_mask * img + (1 - chair_mask) * pose_img
img = mask * pose_img + (1 - mask) * img
img = img.astype('uint8')
cv2.imwrite(os.path.join(out_folder, 'img_%04d.jpg' % i), img, [cv2.IMWRITE_JPEG_QUALITY, 80])
if __name__ == '__main__':
sequence_metas = muco_temp.get_metadata()
p = Pool(6)
params = [(cam, vid) for cam in range(11) for vid in range(0, 7)]
p.starmap(generate_vid_frames, params)
| 2,445 | 41.912281 | 126 | py |
pose_refinement | pose_refinement-master/src/scripts/maskrcnn_bboxes.py | """ Generates Mask-RCNN bounding boxes. """
import argparse
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import cv2
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.config import get_cfg
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.modeling import build_model
import torch
import os
from torch.utils.data import DataLoader, Dataset
from util.misc import save
def get_kpdetection_conf():
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
return cfg
def get_model(cfg):
model = build_model(cfg)
model.eval()
checkpointer = DetectionCheckpointer(model)
checkpointer.load(cfg.MODEL.WEIGHTS)
return model
class ImgDirDataset(Dataset):
def __init__(self, folder, transform):
self.folder = folder
self.files = sorted(os.listdir(folder))
self.transform = transform
def __len__(self):
return len(self.files)
def __getitem__(self, ind):
img = cv2.imread(os.path.join(self.folder, self.files[ind]))
height, width = img.shape[:2]
image = self.transform.get_transform(img).apply_image(img)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)).contiguous()
return {"image": image, "height": height, "width": width, "name": self.files[ind]}
def predict_dataset(model, dataset, out_folder, batch_size=16):
loader = DataLoader(dataset, batch_size, collate_fn=lambda x: x, num_workers=3)
with torch.no_grad():
for batch in loader:
predictions = model(batch)
for i in range(len(batch)):
boxes = predictions[i]['instances'].pred_boxes.tensor.cpu().numpy()
scores = predictions[i]['instances'].scores.cpu().numpy()[:, np.newaxis]
output = np.concatenate([boxes, scores], axis=1)
assert output.shape[1] == 5
save(os.path.join(out_folder, "%s.pkl" % batch[i]['name']), output)
def predict_imgs(input_path, output_path):
cfg = get_kpdetection_conf()
model = get_model(cfg)
transform_gen = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
assert cfg.INPUT.FORMAT == 'BGR'
dataset = ImgDirDataset(input_path, transform_gen)
predict_dataset(model, dataset, output_path, batch_size=8)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_path', help="the path to the input frames")
parser.add_argument('output_path', help="bboxes will be generated here")
args = parser.parse_args()
predict_imgs(args.input_path, args.output_path)
| 3,049 | 29.19802 | 101 | py |
pose_refinement | pose_refinement-master/src/scripts/hrnet_predict.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('../hrnet/lib')
from scripts import hrnet_dataset
# ------------------------------------------------------------------------------
# pose.pytorch
# Copyright (c) 2018-present Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Marton Veges
# ------------------------------------------------------------------------------
import argparse
import time
import os
import pprint
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import numpy as np
from config import cfg
from config import update_config
from core.function import AverageMeter
from utils.utils import create_logger
from core.inference import get_final_preds
from utils.transforms import flip_back
import models
from util.misc import load, ensuredir
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
parser.add_argument('path', help="the path to the video frames and bboxes", type=str)
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=False,
default='../hrnet/experiments/coco/hrnet/w32_256x192_adam_lr1e-3.yaml',
type=str)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
return parser.parse_args()
def predict_dataset(config, dataset, model):
batch_time = AverageMeter()
# switch to evaluate mode
model.eval()
num_samples = len(dataset)
all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32)
all_boxes = np.zeros((num_samples, 6))
image_names = []
orig_boxes = []
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
shuffle=False,
pin_memory=True, num_workers=1
)
idx = 0
with torch.no_grad():
end = time.time()
for i, (input, meta) in enumerate(data_loader):
# compute output
outputs = model(input)
if isinstance(outputs, list):
output = outputs[-1]
else:
output = outputs
if config.TEST.FLIP_TEST:
# this part is ugly, because pytorch has not supported negative index
# input_flipped = model(input[:, :, :, ::-1])
input_flipped = np.flip(input.cpu().numpy(), 3).copy()
input_flipped = torch.from_numpy(input_flipped).cuda()
outputs_flipped = model(input_flipped)
if isinstance(outputs_flipped, list):
output_flipped = outputs_flipped[-1]
else:
output_flipped = outputs_flipped
output_flipped = flip_back(output_flipped.cpu().numpy(),
dataset.flip_pairs)
output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
# feature is not aligned, shift flipped heatmap for higher accuracy
if config.TEST.SHIFT_HEATMAP:
output_flipped[:, :, :, 1:] = \
output_flipped.clone()[:, :, :, 0:-1]
output = (output + output_flipped) * 0.5
num_images = input.size(0)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
c = meta['center'].numpy()
s = meta['scale'].numpy()
score = meta['score'].numpy()
preds, maxvals = get_final_preds(config, output.clone().cpu().numpy(), c, s)
all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
all_preds[idx:idx + num_images, :, 2:3] = maxvals
# double check this all_boxes parts
all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
all_boxes[idx:idx + num_images, 5] = score
names = meta['image']
image_names.extend(names)
orig_boxes.extend(meta['origbox'])
idx += num_images
if i % config.PRINT_FREQ == 0:
msg = 'Test: [{0}/{1}]\t' \
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(
i, len(data_loader), batch_time=batch_time)
print(msg)
return all_preds, all_boxes, image_names, orig_boxes
def predict_imgs(model, img_folder, bbox_folder, output_file, normalize, detection_thresh):
detections = {}
for file in sorted(os.listdir(bbox_folder)):
dets = load(os.path.join(bbox_folder, file))
assert dets.shape[1] == 5
img_name = file[:-4] # remove extension
detections[img_name] = dets
valid_dataset = hrnet_dataset.ImgFolderDataset(cfg, img_folder, detections,
normalize, detection_thresh)
start = time.time()
preds, boxes, image_names, orig_boxes = predict_dataset(cfg, valid_dataset, model)
end = time.time()
print("Time in prediction: " + str(end - start))
ensuredir(os.path.dirname(output_file))
valid_dataset.rescore_and_save_result(output_file, preds, boxes, image_names, orig_boxes)
def predict(cfg_path, img_dir, bbox_dir, out_file, param_overrides=[]):
# update_config needs some hardcoded params, fake them here
class args:
cfg = cfg_path
opts = param_overrides
modelDir = ''
logDir = ''
dataDir = ''
update_config(cfg, args)
cfg.defrost()
cfg.TEST.MODEL_FILE = '../hrnet/pose_hrnet_w32_256x192.pth'
cfg.TEST.USE_GT_BBOX = False
cfg.TEST.BATCH_SIZE_PER_GPU = 64
cfg.GPUS = (0,)
cfg.freeze()
logger, final_output_dir, tb_log_dir = create_logger(cfg, cfg_path, 'valid')
logger.info(pprint.pformat(args))
logger.info(cfg)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False)
model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
normalize = transforms.Compose([transforms.ToTensor(), normalize])
detection_thresh = 0.8
img_dir = os.path.join(img_dir, '*') # Dataset requires a glob format
predict_imgs(model, img_dir, bbox_dir, out_file, normalize, detection_thresh)
if __name__ == '__main__':
args = parse_args()
img_dir = os.path.join(args.path, 'frames')
bbox_dir = os.path.join(args.path, 'bboxes')
out_file = os.path.join(args.path, 'keypoints.json')
predict(args.cfg, img_dir, bbox_dir, out_file, param_overrides=args.opts) | 7,588 | 33.03139 | 95 | py |
pose_refinement | pose_refinement-master/src/scripts/hrnet_dataset.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Marton Veges
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import logging
import os
from util.misc import ensuredir
import json_tricks as json
import numpy as np
import copy
import cv2
import glob
from torch.utils.data import Dataset
from utils.transforms import get_affine_transform
from nms.nms import oks_nms
from nms.nms import soft_oks_nms
logger = logging.getLogger(__name__)
class BaseDataset(Dataset):
"""
A DataLoader loading bounding boxes for CoCo joints evaluation.
"keypoints": {
0: "nose",
1: "left_eye",
2: "right_eye",
3: "left_ear",
4: "right_ear",
5: "left_shoulder",
6: "right_shoulder",
7: "left_elbow",
8: "right_elbow",
9: "left_wrist",
10: "right_wrist",
11: "left_hip",
12: "right_hip",
13: "left_knee",
14: "right_knee",
15: "left_ankle",
16: "right_ankle"
},
"skeleton": [
[16,14],[14,12],[17,15],[15,13],[12,13],[6,12],[7,13], [6,7],[6,8],
[7,9],[8,10],[9,11],[2,3],[1,2],[1,3],[2,4],[3,5],[4,6],[5,7]]
"""
def __init__(self, cfg):
# Unpack NMS threshold parameters
self.image_thre = cfg.TEST.IMAGE_THRE # bounding boxes lower than this value are not predicted, just thrown away
self.soft_nms = cfg.TEST.SOFT_NMS
self.oks_thre = cfg.TEST.OKS_THRE
self.in_vis_thre = cfg.TEST.IN_VIS_THRE
# Unpack image size parameters
self.image_width = cfg.MODEL.IMAGE_SIZE[0]
self.image_height = cfg.MODEL.IMAGE_SIZE[1]
self.image_size = np.array(cfg.MODEL.IMAGE_SIZE)
self.aspect_ratio = self.image_width * 1.0 / self.image_height
self.pixel_std = 200
self.color_rgb = cfg.DATASET.COLOR_RGB
self.num_joints = 17
self.flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8],
[9, 10], [11, 12], [13, 14], [15, 16]]
def _lurb2cs(self, box): # TODO check!!!!!1111
x, y = box[:2]
w = box[2] - box[0]
h = box[3] - box[1]
return self._xywh2cs(x, y, w, h)
def _box2cs(self, box):
x, y, w, h = box[:4]
return self._xywh2cs(x, y, w, h)
def _xywh2cs(self, x, y, w, h):
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > self.aspect_ratio * h:
h = w * 1.0 / self.aspect_ratio
elif w < self.aspect_ratio * h:
w = h * self.aspect_ratio
scale = np.array([w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std], dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def rescore_and_save_result(self, output_file, preds, all_boxes, img_path, orig_boxes):
assert output_file.endswith('.json') or output_file.endswith('.npy'), "Only json and numpy output is supported"
ensuredir(os.path.dirname(output_file))
# person x (keypoints)
_kpts = []
for idx, kpt in enumerate(preds):
_kpts.append({
'keypoints': kpt,
'center': all_boxes[idx][0:2],
'scale': all_boxes[idx][2:4],
'area': all_boxes[idx][4],
'score': all_boxes[idx][5],
'image': img_path[idx],
'origbox': orig_boxes[idx]
})
# image x person x (keypoints)
kpts = defaultdict(list)
for kpt in _kpts:
kpts[kpt['image']].append(kpt)
# rescoring and oks nms
num_joints = self.num_joints
in_vis_thre = self.in_vis_thre
oks_thre = self.oks_thre
oks_nmsed_kpts = []
nmsed_kpts_by_frame = defaultdict(list)
for img in kpts.keys():
img_kpts = kpts[img]
for n_p in img_kpts:
box_score = n_p['score']
kpt_score = 0
valid_num = 0
for n_jt in range(0, num_joints):
t_s = n_p['keypoints'][n_jt][2]
if t_s > in_vis_thre:
kpt_score = kpt_score + t_s
valid_num = valid_num + 1
if valid_num != 0:
kpt_score = kpt_score / valid_num
# rescoring
n_p['score'] = kpt_score * box_score
if self.soft_nms:
keep = soft_oks_nms([img_kpts[i] for i in range(len(img_kpts))], oks_thre)
else:
keep = oks_nms([img_kpts[i] for i in range(len(img_kpts))], oks_thre)
if len(keep) == 0:
selected_kpts = img_kpts
else:
selected_kpts = [img_kpts[_keep] for _keep in keep]
oks_nmsed_kpts.append(selected_kpts)
nmsed_kpts_by_frame[img] = selected_kpts
self._write_keypoint_results(nmsed_kpts_by_frame, output_file)
def _write_keypoint_results(self, keypoints, output_file):
# TODO turn list into numpy arrays
if output_file.endswith('.json'):
# Convert numpy arrays to Python lists
for img_name, poses in keypoints.items():
for pose in poses:
pose['center'] = pose['center'].tolist()
pose['scale'] = pose['scale'].tolist()
pose['keypoints'] = pose['keypoints'].ravel().tolist()
pose['origbox'] = pose['origbox'].tolist()
with open(output_file, 'w') as f:
json.dump(keypoints, f, sort_keys=True, indent=4)
elif output_file.endswith('npy'):
frame_ind = keypoints.keys()
assert all([f.startswith('videocap#') for f in frame_ind])
frame_ind = sorted(frame_ind, key=lambda x: int(x[len('videocap#'):]))
kps = []
for f in frame_ind:
assert len(keypoints[f]) == 1, 'Only images with a single pose are supported in numpy save mode, found: ' + str(
keypoints[f])
kps.append(keypoints[f][0]['keypoints'])
kps = np.stack(kps, axis=0)
print("shape:" + str(kps.shape))
np.save(output_file, kps)
else:
raise NotImplementedError('Unknown file ending: ' + output_file)
class ImgFolderDataset(BaseDataset):
""" Can be used with a folder of images"""
def __init__(self, cfg, img_path, dets, transform, det_threshold):
"""
:param cfg: config object
:param img_path: path to folder, must be glob (e.g. *.jpg)
:param dets: detections img->boxes
:param transform: transformations to apply on images
"""
super(ImgFolderDataset, self).__init__(cfg)
self.img_paths = sorted(glob.glob(img_path))
self.basedir = os.path.dirname(img_path)
self.dets = dets
self.transform = transform
self.image_thre = det_threshold
# check there is a file for all detections
# img_names = set([os.path.basename(x) for x in self.img_paths])
# for img in dets:
# assert img in img_names, "Could not find " + img
self.db = self._prepare_db()
self.last_idx_read = None
self.last_img_read = None
self.last_img = None
def _prepare_db(self):
"""
Prepares the detections from the self.dets field. Optionally filters out detected bounding boxes if their
score is low.
"""
kpt_db = []
filtered_boxes_num = 0
total_boxes_num = 0
for img_name in sorted(self.dets.keys()):
boxes = self.dets[img_name]
total_boxes_num += len(boxes)
for box in boxes:
score = box[4]
if score < self.image_thre:
continue
filtered_boxes_num = filtered_boxes_num + 1
center, scale = self._lurb2cs(box[:4])
kpt_db.append({
'image': img_name,
'center': center,
'scale': scale,
'score': score,
'origbox': box[:4]
})
logger.info('=> Total boxes: {}'.format(total_boxes_num))
logger.info('=> Total boxes after filter low score@{}: {}'.format(self.image_thre, filtered_boxes_num))
return kpt_db
def _get_img(self, img_name):
# Read from cache
if self.last_img_read == img_name:
return self.last_img
# assert self.last_img_read is None or self.last_frame_read == idx - 1, "Can only read sequentially %d -> %d" % \
# (self.last_frame_read, idx)
img = cv2.imread(os.path.join(self.basedir, img_name))
assert img is not None, "could not find " + img_name
if self.color_rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.last_img_read = img_name
self.last_img = img
return img
def __len__(self):
return len(self.db)
def __getitem__(self, idx):
assert self.last_idx_read is None or self.last_idx_read == idx - 1, "idx junmp: %d -> %d" % (self.last_idx_read, idx)
db_rec = copy.deepcopy(self.db[idx])
self.last_idx_read = idx
image_file = db_rec['image']
frame = self._get_img(image_file)
if frame is None:
logger.error('=> fail to read {}'.format(image_file))
raise ValueError('Fail to read {}'.format(image_file))
c = db_rec['center']
s = db_rec['scale']
score = db_rec['score'] if 'score' in db_rec else 1
r = 0
trans = get_affine_transform(c, s, r, self.image_size)
input = cv2.warpAffine(frame, trans, (int(self.image_size[0]), int(self.image_size[1])), flags=cv2.INTER_LINEAR)
if self.transform:
input = self.transform(input)
meta = {
'image': image_file,
'origbox': db_rec['origbox'],
'center': c,
'scale': s,
'rotation': r,
'score': score
}
return input, meta
| 10,625 | 32.415094 | 128 | py |
pose_refinement | pose_refinement-master/src/scripts/eval.py | #!/usr/bin/python3
"""
Evaluates a (not end2end) model on MuPo-TS
"""
import argparse
import os
import numpy as np
import torch
from util.misc import load
from databases import mupots_3d
from databases.datasets import PersonStackedMuPoTsDataset
from databases.joint_sets import MuPoTSJoints, CocoExJoints
from model.pose_refinement import optimize_poses, StackedArrayAllMupotsEvaluator
from model.videopose import TemporalModel
from training.callbacks import TemporalMupotsEvaluator
from training.preprocess import get_postprocessor, SaveableCompose, MeanNormalize3D
LOG_PATH = '../models'
def unstack_mupots_poses(dataset, predictions):
""" Converts output of the logger to dict of list of ndarrays. """
COCO_TO_MUPOTS = []
for i in range(MuPoTSJoints.NUM_JOINTS):
try:
COCO_TO_MUPOTS.append(CocoExJoints().index_of(MuPoTSJoints.NAMES[i]))
except:
COCO_TO_MUPOTS.append(-1)
COCO_TO_MUPOTS = np.array(COCO_TO_MUPOTS)
assert np.all(COCO_TO_MUPOTS[1:14] >= 0)
pred_2d = {}
pred_3d = {}
for seq in range(1, 21):
gt = mupots_3d.load_gt_annotations(seq)
gt_len = len(gt['annot2'])
pred_2d[seq] = []
pred_3d[seq] = []
seq_inds = (dataset.index.seq_num == seq)
for i in range(gt_len):
frame_inds = (dataset.index.frame == i)
valid = dataset.good_poses & seq_inds & frame_inds
pred_2d[seq].append(dataset.poses2d[valid, :, :2][:, COCO_TO_MUPOTS])
pred_3d[seq].append(predictions[seq][frame_inds[dataset.good_poses & seq_inds]])
return pred_2d, pred_3d
def load_model(model_folder):
config = load(os.path.join(LOG_PATH, model_folder, 'config.json'))
path = os.path.join(LOG_PATH, model_folder, 'model_params.pkl')
# Input/output size calculation is hacky
weights = torch.load(path)
num_in_features = weights['expand_conv.weight'].shape[1]
m = TemporalModel(num_in_features, MuPoTSJoints.NUM_JOINTS, config['model']['filter_widths'],
dropout=config['model']['dropout'], channels=config['model']['channels'])
m.cuda()
m.load_state_dict(weights)
m.eval()
return config, m
def get_dataset(config):
data = PersonStackedMuPoTsDataset(config['pose2d_type'], config.get('pose3d_scaling', 'normal'), pose_validity='all')
return data
def main(model_name, pose_refine):
config, m = load_model(model_name)
test_set = get_dataset(config)
params_path = os.path.join(LOG_PATH, str(model_name), 'preprocess_params.pkl')
transform = SaveableCompose.from_file(params_path, test_set, globals())
test_set.transform = transform
assert isinstance(transform.transforms[1].normalizer, MeanNormalize3D)
normalizer3d = transform.transforms[1].normalizer
post_process_func = get_postprocessor(config, test_set, normalizer3d)
logger = TemporalMupotsEvaluator(m, test_set, config['model']['loss'], True, post_process3d=post_process_func)
logger.eval(calculate_scale_free=not pose_refine, verbose=not pose_refine)
if pose_refine:
refine_config = load('../models/pose_refine_config.json')
pred = np.concatenate([logger.preds[i] for i in range(1,21)])
pred = optimize_poses(pred, test_set, refine_config)
l = StackedArrayAllMupotsEvaluator(pred, test_set, True)
l.eval(calculate_scale_free=True, verbose=True)
pred_by_seq = {}
for seq in range(1, 21):
inds = test_set.index.seq_num == seq
pred_by_seq[seq] = pred[inds]
pred_2d, pred_3d = unstack_mupots_poses(test_set, pred_by_seq)
else:
pred_2d, pred_3d = unstack_mupots_poses(test_set, logger.preds)
print("\nR-PCK R-AUC A-PCK A-AUC")
for relative in [True, False]:
pcks, aucs = mupots_3d.eval_poses(False, relative, 'annot3' if config['pose3d_scaling'] == 'normal' else 'univ_annot3',
pred_2d, pred_3d, keep_matching=True)
pck = np.mean(list(pcks.values()))
auc = np.mean(list(aucs.values()))
print(" %4.1f %4.1f " % (pck, auc), end='')
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('model_name', help="Name of the model (either 'normal' or 'universal')")
parser.add_argument('-r', '--pose-refine', action='store_true', help='Apply pose-refinement after TPN')
args = parser.parse_args()
main(args.model_name, args.pose_refine)
| 4,512 | 34.81746 | 127 | py |
pose_refinement | pose_refinement-master/src/scripts/__init__.py | 0 | 0 | 0 | py | |
pose_refinement | pose_refinement-master/src/scripts/predict.py | import argparse
import cv2
import numpy as np
import os
from databases.datasets import FlippableDataset
from databases.joint_sets import MuPoTSJoints, CocoExJoints
from model.pose_refinement import optimize_poses
from scripts.eval import load_model, LOG_PATH
from training.callbacks import TemporalTestEvaluator
from training.preprocess import SaveableCompose, MeanNormalize3D, get_postprocessor
from util.misc import load, save, ensuredir
from util.pose import extend_hrnet_raw
import shutil
import sys
from scripts import maskrcnn_bboxes, hrnet_predict
def stack_hrnet_raw(img_names, keypoints_raw):
"""
Gets a JSON output of hrnet and converts it into a numpy array. Only works
with single pose images.
Parameters:
img_names: an array of image names to determine the output order
Returns:
keypoints: ndarray(len(img_names), 19, 3) - the 2D keypoints according to HR-net. Invalid poses have all-zero values
is_valid: ndarray(len(img_names),) - True if a person was detected on the image
"""
keypoints = []
is_valid = []
for frame in img_names:
if frame in keypoints_raw:
scores = [x['score'] for x in keypoints_raw[frame]]
best_ind = np.argmax(scores)
pose = np.array(keypoints_raw[frame][best_ind]['keypoints'])
pose = pose.reshape((17, 3))
is_valid.append(True)
else:
pose = np.zeros((17, 3)) # placeholder value
is_valid.append(False)
keypoints.append(pose)
keypoints = np.stack(keypoints)
keypoints = extend_hrnet_raw(keypoints).astype('float32')
is_valid = np.array(is_valid)
keypoints[~is_valid] = 0
return keypoints, is_valid
class VideoTemporalDataset(FlippableDataset):
def __init__(self, frame_folder, hrnet_keypoint_file, fx, fy, cx=None, cy=None):
self.transform = None
self.pose2d_jointset = CocoExJoints()
self.pose3d_jointset = MuPoTSJoints()
frame_list = sorted(os.listdir(frame_folder))
N = len(frame_list)
hrnet_detections = load(hrnet_keypoint_file)
self.poses2d, self.valid_2d_pred = stack_hrnet_raw(frame_list, hrnet_detections)
assert len(self.poses2d) == N, "unexpected number of frames"
index = [('vid', i) for i in range(N)]
self.index = np.rec.array(index, dtype=[('seq', 'U4'), ('frame', 'int32')])
self.poses3d = np.ones((N, self.pose3d_jointset.NUM_JOINTS, 3)) # dummy values
# load first frame to get width/height
frame = cv2.imread(os.path.join(frame_folder, frame_list[0]))
self.width = frame.shape[1]
self.fx = np.full(N, fx, dtype='float32')
self.fy = np.full(N, fy, dtype='float32')
self.cx = np.full(N, cx if cx is not None else frame.shape[1] / 2, dtype='float32')
self.cy = np.full(N, cy if cy is not None else frame.shape[0] / 2, dtype='float32')
assert self.poses2d.shape[1] == self.pose2d_jointset.NUM_JOINTS
def prepare_sample(self, ind):
if isinstance(ind, (list, tuple, np.ndarray)):
width = np.full(len(ind), self.width, dtype='int32')
else:
width = self.width
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.valid_2d_pred[ind], 'cx': self.cx[ind], 'width': width}
return sample
def run_tpn(model_name, img_folder, hrnet_keypoint_file, pose_refine, focal_length, cx, cy):
config, m = load_model(model_name)
dataset = VideoTemporalDataset(img_folder, hrnet_keypoint_file, focal_length, focal_length, cx, cy)
params_path = os.path.join(LOG_PATH, str(model_name), 'preprocess_params.pkl')
transform = SaveableCompose.from_file(params_path, dataset, globals())
dataset.transform = transform
assert isinstance(transform.transforms[1].normalizer, MeanNormalize3D)
normalizer3d = transform.transforms[1].normalizer
post_process_func = get_postprocessor(config, dataset, normalizer3d)
logger = TemporalTestEvaluator(m, dataset, config['model']['loss'], True, post_process3d=post_process_func)
logger.eval(calculate_scale_free=False, verbose=False)
poses = logger.preds['vid']
if pose_refine:
print("Refining poses...")
refine_config = load('../models/pose_refine_config.json')
poses = optimize_poses(poses, dataset, refine_config)
return poses
def main(args):
assert args.output.endswith('.pkl'), "Output file must be a pkl file"
# Clear up temp folder if it exists
if os.path.exists(args.tmp_folder) and not args.tpn_only:
shutil.rmtree(args.tmp_folder)
frame_dir = os.path.join(args.tmp_folder, 'frames')
bbox_dir = os.path.join(args.tmp_folder, 'bboxes')
keypoint_file = os.path.join(args.tmp_folder, 'keypoints.json')
if not args.tpn_only:
# split to frames:
ensuredir(os.path.join(args.tmp_folder, 'frames'))
out = os.system("ffmpeg -i %s -qscale:v 2 %s/frames/img_%%06d.jpg" % (args.vid_path, args.tmp_folder))
if out != 0:
print("could not split to frames, error code: " + str(out))
sys.exit(1)
# Mask-RCNN
maskrcnn_bboxes.predict_imgs(frame_dir, bbox_dir)
# hrnet
hrnet_predict.predict('../hrnet/experiments/coco/hrnet/w32_256x192_adam_lr1e-3.yaml', frame_dir,
bbox_dir, keypoint_file)
# Run TPN
print("Running TPN...")
poses = run_tpn(args.model, frame_dir, keypoint_file, args.pose_refine, args.focal_length, args.cx, args.cy)
save(args.output, poses)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('vid_path', help="Path to the video file")
parser.add_argument('output', help="Output .pkl file")
parser.add_argument('-m', '--model', default='normal', help="Name of the model (either 'normal' or 'universal')")
parser.add_argument('-r', '--pose-refine', action='store_true', help='Apply pose-refinement after TPN')
parser.add_argument('-f', '--focal-length', default=1200, type=float, help='focal length of the camera')
parser.add_argument('-cx', '--cx', default=None, type=float, help='horizontal centerpoint of camera')
parser.add_argument('-cy', '--cy', default=None, type=float, help='vertical centerpoint of camera')
parser.add_argument('--tmp-folder', default='../tmp', help="Path to a folder where temporary results are stored")
parser.add_argument('--tpn-only', action='store_true', help='Run the TPN only. This requires the bounding boxes and keypoints' +
' already generated in the temporary folder.')
args = parser.parse_args()
main(args)
| 6,812 | 38.842105 | 132 | py |
pose_refinement | pose_refinement-master/src/scripts/train.py | import argparse
import os
from databases.datasets import Mpi3dTestDataset, Mpi3dTrainDataset, PersonStackedMucoTempDataset, ConcatPoseDataset
from model.videopose import TemporalModel, TemporalModelOptimized1f
from training.callbacks import preds_from_logger, ModelCopyTemporalEvaluator
from training.loaders import ChunkedGenerator
from training.preprocess import *
from training.torch_tools import torch_train
from util.misc import save, ensuredir
def calc_loss(model, batch, config):
if config['model']['loss'] == 'l1_nan':
pose2d = batch['temporal_pose2d']
gt_3d = batch['pose3d']
if config['ignore_invisible']:
pose2d = pose2d[batch['valid_pose']]
gt_3d = gt_3d[batch['valid_pose']]
if isinstance(pose2d, torch.Tensor):
inds = torch.all(torch.all(~torch.isnan(pose2d), dim=(-1)), dim=-1)
pose2d = pose2d[inds]
gt_3d = gt_3d[inds]
pose2d = pose2d.to('cuda')
gt_3d = gt_3d.to('cuda')
else:
inds = np.all(~np.isnan(pose2d), axis=(-1, -2))
pose2d = pose2d[inds]
gt_3d = gt_3d[inds]
pose2d = torch.from_numpy(pose2d).to('cuda')
gt_3d = torch.from_numpy(gt_3d).to('cuda')
elif config['model']['loss'] == 'l1':
pose2d = batch['temporal_pose2d']
gt_3d = batch['pose3d']
if config['ignore_invisible']:
pose2d = pose2d[batch['valid_pose']]
gt_3d = gt_3d[batch['valid_pose']]
pose2d = pose2d.to('cuda')
gt_3d = gt_3d.to('cuda')
# forward pass
pred_3d = model(pose2d)
if config['model']['loss'] == 'l1':
loss_3d = torch.nn.functional.l1_loss(pred_3d, gt_3d)
elif config['model']['loss'] == 'l1_nan':
loss_3d = torch.nn.functional.l1_loss(pred_3d, gt_3d)
else:
raise Exception('Unknown pose loss: ' + str(config['model']['loss']))
return loss_3d, {'loss_3d': loss_3d.item()}
def run_experiment(output_path, _config):
save(os.path.join(output_path , 'config.json'), _config)
ensuredir(output_path)
if _config['train_data'] == 'mpii_train':
print("Training data is mpii-train")
train_data = Mpi3dTrainDataset(_config['pose2d_type'], _config['pose3d_scaling'],
_config['cap_25fps'], _config['stride'])
elif _config['train_data'] == 'mpii+muco':
print("Training data is mpii-train and muco_temp concatenated")
mpi_data = Mpi3dTrainDataset(_config['pose2d_type'], _config['pose3d_scaling'],
_config['cap_25fps'], _config['stride'])
muco_data = PersonStackedMucoTempDataset(_config['pose2d_type'], _config['pose3d_scaling'])
train_data = ConcatPoseDataset(mpi_data, muco_data)
elif _config['train_data'].startswith('muco_temp'):
train_data = PersonStackedMucoTempDataset(_config['pose2d_type'], _config['pose3d_scaling'])
test_data = Mpi3dTestDataset(_config['pose2d_type'], _config['pose3d_scaling'], eval_frames_only=True)
if _config['simple_aug']:
train_data.augment(False)
# Load the preprocessing steps
train_data.transform = None
transforms_train = [decode_trfrm(_config['preprocess_2d'], globals())(train_data, cache=False),
decode_trfrm(_config['preprocess_3d'], globals())(train_data, cache=False)]
normalizer2d = transforms_train[0].normalizer
normalizer3d = transforms_train[1].normalizer
transforms_test = [decode_trfrm(_config['preprocess_2d'], globals())(test_data, normalizer2d),
decode_trfrm(_config['preprocess_3d'], globals())(test_data, normalizer3d)]
transforms_train.append(RemoveIndex())
transforms_test.append(RemoveIndex())
train_data.transform = SaveableCompose(transforms_train)
test_data.transform = SaveableCompose(transforms_test)
# save normalisation params
save(output_path+'/preprocess_params.pkl', train_data.transform.state_dict())
print("Length of training data:", len(train_data))
print("Length of test data:", len(test_data))
model = TemporalModelOptimized1f(train_data[[0]]['pose2d'].shape[-1],
MuPoTSJoints.NUM_JOINTS, _config['model']['filter_widths'],
dropout=_config['model']['dropout'], channels=_config['model']['channels'],
layernorm=_config['model']['layernorm'])
test_model = TemporalModel(train_data[[0]]['pose2d'].shape[-1],
MuPoTSJoints.NUM_JOINTS, _config['model']['filter_widths'],
dropout=_config['model']['dropout'], channels=_config['model']['channels'],
layernorm=_config['model']['layernorm'])
model.cuda()
test_model.cuda()
save(output_path+'/model_summary.txt', str(model))
pad = (model.receptive_field() - 1) // 2
train_loader = ChunkedGenerator(train_data, _config['batch_size'], pad, _config['train_time_flip'], shuffle=True)
tester = ModelCopyTemporalEvaluator(test_model, test_data, _config['model']['loss'], _config['test_time_flip'],
post_process3d=get_postprocessor(_config, test_data, normalizer3d), prefix='test')
torch_train(train_loader, model, lambda m, b: calc_loss(m, b, _config), _config, callbacks=[tester])
torch.save(model.state_dict(), os.path.join(output_path, 'model_params.pkl'))
save(output_path+'/test_results.pkl', {'index': test_data.index, 'pred': preds_from_logger(test_data, tester),
'pose3d': test_data.poses3d})
def main(output_path):
params = {
'num_epochs': 80,
'preprocess_2d': 'DepthposeNormalize2D',
'preprocess_3d': 'SplitToRelativeAbsAndMeanNormalize3D',
# training
'optimiser': 'adam',
'adam_amsgrad': True,
'learning_rate': 1e-3,
'sgd_momentum': 0,
'batch_size': 1024,
'train_time_flip': True,
'test_time_flip': True,
'lr_scheduler': {
'type': 'multiplicative',
'multiplier': 0.95,
'step_size': 1,
},
# dataset
'ignore_invisible': True,
'train_data': 'mpii+muco',
'pose2d_type': 'hrnet',
'pose3d_scaling': 'normal',
'megadepth_type': 'megadepth_at_hrnet',
'cap_25fps': True,
'stride': 2,
'simple_aug': True, # augments data by duplicating each frame
'model': {
'loss': 'l1',
'channels': 1024,
'dropout': 0.25,
'filter_widths': [3, 3, 3, 3],
'layernorm': False,
},
}
run_experiment(output_path, params)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', default='../output', help='folder to save the model to')
args = parser.parse_args()
main(args.output)
| 7,059 | 38.222222 | 122 | py |
pose_refinement | pose_refinement-master/src/databases/mupots_3d.py | import glob
import os
import cv2
import numpy as np
from databases.joint_sets import MuPoTSJoints
from util.misc import load, assert_shape
from util.mx_tools import calibration_matrix
MUPO_TS_PATH = '../datasets/MuPoTS'
def _decode_sequence(sequence):
assert isinstance(sequence, (int, np.int32, str)), "sequence must be an int or string"
if isinstance(sequence, (int, np.int32)):
assert 1 <= sequence <= 20, "sequence id must be between 1 and 20"
sequence = "TS" + str(sequence)
return sequence
def get_frame_files(sequence):
"""
Returns the list of jpg files for a given video sequence.
Parameters:
sequence: either an int between 1 and 20 or a string in the form TSx.
"""
sequence = _decode_sequence(sequence)
folder = os.path.join(MUPO_TS_PATH, "MultiPersonTestSet", sequence)
assert os.path.isdir(folder), "Could not find " + folder
return sorted(glob.glob(folder + '/*.jpg'))
def _concat_raw_gt(gt, field, dtype):
""" Concatenates gt annotations coming from the annot.mat file. """
data = np.empty(gt.shape + gt[0, 0][field][0, 0].T.shape, dtype=dtype)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
data[i, j] = gt[i, j][field][0, 0].T
return data
def load_gt_annotations(sequence):
"""
Loads GT annotations as numpy arrays. This method cleans up the unnecessary indices
resulting from the `.mat` file loading.
Returns a dict. Has the following keys:
- annot2: (nFrames, nPoses, 17, 2), float32
- annot3: (nFrames, nPoses, 17, 3), float32, the unnormalized coordinates
- univ_annot3: (nFrames, nPoses, 17, 3), float32
- isValidFrame: (nFrames, nPoses), bool
- occlusions: (nFrames, nPoses, 17), bool
"""
data = load_raw_gt_annotations(sequence)
occlusions = load_raw_gt_occlusions(sequence)
occ_out = np.empty(occlusions.shape + (17,), dtype='bool')
for i in range(occlusions.shape[0]):
for j in range(occlusions.shape[1]):
occ_out[i, j] = occlusions[i, j][0]
result = {'annot2': _concat_raw_gt(data, 'annot2', 'float32'),
'annot3': _concat_raw_gt(data, 'annot3', 'float32'),
'univ_annot3': _concat_raw_gt(data, 'univ_annot3', 'float32'),
'isValidFrame': _concat_raw_gt(data, 'isValidFrame', 'bool').squeeze(),
'occlusions': occ_out}
return result
def load_raw_gt_annotations(sequence):
""" Loads the GT annotations from the MuPo-TS `annnot.mat` file. """
sequence = _decode_sequence(sequence)
return load(os.path.join(MUPO_TS_PATH, "MultiPersonTestSet", sequence, 'annot.mat'))['annotations']
def load_raw_gt_occlusions(sequence):
sequence = _decode_sequence(sequence)
return load(os.path.join(MUPO_TS_PATH, "MultiPersonTestSet", sequence, 'occlusion.mat'))['occlusion_labels']
def load_2d_predictions(sequence, detector):
""" Loads precreated 2D pose predictions. These are matched with GT poses. """
assert detector == 'hrnet'
sequence = _decode_sequence(sequence)
return load(os.path.join(MUPO_TS_PATH, "hrnet_pose2d", sequence + '.pkl'), pkl_py2_comp=True)
def all_sequences():
""" Returns every available sequence's name. """
folder = os.path.join(MUPO_TS_PATH, "MultiPersonTestSet")
assert os.path.isdir(folder), "Could not find " + folder
return sorted(os.listdir(folder))
def _sequence2num(sequence):
""" Returns the input sequence as a number. """
if isinstance(sequence, str):
sequence = int(sequence[2])
return sequence
def get_fps(sequence):
sequence = _sequence2num(sequence)
return 30 if sequence <= 5 else 60
def get_image(sequence, frameind):
"""
:param sequence: sequence id
:param frameind: zero based index of the image
:return:
"""
sequence = _decode_sequence(sequence)
img = cv2.imread(os.path.join(MUPO_TS_PATH, "MultiPersonTestSet", sequence, 'img_%06d.jpg' % frameind))
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def image_size(sequence):
"""
Returns:
width, height
"""
sequence = _sequence2num(sequence)
return (2048, 2048) if sequence <= 5 else (1920, 1080)
def get_calibration_matrices():
calibs = {}
for seq in range(1, 21):
annot = load_gt_annotations(seq)
valid = np.logical_and(annot['isValidFrame'][:, :, np.newaxis], annot['occlusions'])
pose2d = annot['annot2'][valid]
pose3d = annot['annot3'][valid]
calibs[seq], reproj, resx, resy, _, _ = calibration_matrix(pose2d, pose3d)
return calibs
def _match_poses(gt_pose_2d, gt_visibility, pred_pose_2d, pred_visibility, threshold, verbose=False):
"""
Implements ``mpii_multiperson_get_identity_matching.m``.
Parameters:
gt_pose_2d: (nGtPoses,nJoints,2), ground truth 2D poses on the image.
gt_visibility: (nGtPoses,nJoints), True if the given joint is visible in ground-truth.
gt_pose_2d: (nPredPoses,nJoints,2) predicted 2D poses on the image.
pred_visibility: (nPredPoses,nJoints), True if the given joint is visible in predictions.
Returns:
ndarray(nGtPoses), the indices of the matched predicted pose for all ground truth poses. If no
matches were found, the value is -1.
"""
pair_ind = -np.ones(len(gt_pose_2d), dtype='int64') # -1 means no pair, otherwise the pair id
has_gt_pair = np.zeros(len(pred_pose_2d), dtype='bool') # True means the predicted pose is already matched up
if verbose:
print(gt_visibility)
print(pred_visibility)
for i in range(len(gt_pose_2d)):
diff = np.abs(gt_pose_2d[[i]] - pred_pose_2d) # (nPredPose, nJoints, 2)
matches = np.all(diff < threshold, axis=2) # (nPredPose, nJoints)
match_scores = np.sum(matches * (gt_visibility[[i]] & pred_visibility), axis=1)
match_scores[has_gt_pair] = 0 # zero out scores for already matched up pred_poses
if verbose:
print(match_scores)
best_match_ind = np.argmax(match_scores)
if match_scores[best_match_ind] > 0:
pair_ind[i] = best_match_ind
has_gt_pair[best_match_ind] = True
return pair_ind
# Parents of joints in MuPoTS joint set
_JOINT_PARENTS = np.array([2, 16, 2, 3, 4, 2, 6, 7, 15, 9, 10, 15, 12, 13, 15, 15, 2]) - 1
# The order in which joints are scaled, from the hip to outer limbs
_TRAVERSAL_ORDER = np.array([15, 16, 2, 1, 17, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]) - 1
def _scale_to_gt(pred_poses, gt_poses):
""" Scales bone lengths in pred_poses to match gt_poses. Corresponds to ``mpii_map_to_gt_bone_lengths.m``."""
assert_shape(pred_poses, (None, 17, 3))
assert_shape(gt_poses, (None, 17, 3))
rescaled_pred_poses = pred_poses.copy()
for ind in _TRAVERSAL_ORDER:
parent = _JOINT_PARENTS[ind]
gt_bone_length = np.linalg.norm(gt_poses[:, ind] - gt_poses[:, parent], axis=1) # (nPoses,)
pred_bone = pred_poses[:, ind] - pred_poses[:, parent] # (nPoses, 3)
pred_bone = pred_bone * gt_bone_length[:, np.newaxis] / \
(np.linalg.norm(pred_bone, axis=1, keepdims=True) + 1e-8)
rescaled_pred_poses[:, ind] = rescaled_pred_poses[:, parent] + pred_bone
return rescaled_pred_poses
PCK_THRESHOLD = 150
AUC_THRESHOLDS = np.arange(0, 151, 5)
def eval_poses(matched_only, is_relative, pose3d_type, preds_2d_kpt, preds_3d_kpt, keep_matching=False):
"""
Calculates the PCK and AUC. This function is equivalent to ``mpii_mupots_multiperson_eval.m``.
It performs the same gt scaling transformation, uses the same joints for matching and evaluation.
:param matched_only: True if only detected poses count towards the PCK and AUC
:param is_relative: True if relative error is calculated
:param pose3d_type: 'annot3' or 'univ_annot3'
:param preds_2d_kpt: seq->list(ndarray(nPoses,17,2)), in MuPo-TS joint order. 2D pose predictions.
:param preds_3d_kpt: seq->list(ndarray(nPoses,17,2)), in MuPo-TS joint order. 3D pose predictions.
:param keep_matching: if True, the preds_2d_kpt arrays are assumed to be already matched with gt.
Otherwise, the matching algorithm in mpii_map_to_gt_bone_lengths is used.
:return: two dicts from seq name to pck and auc
"""
# Joints used in original evaluation script
joints_for_matching = np.arange(1, 14) # Joints used to match up the 2D poses
joint_groups = [['Head', [0]], ['Neck', [1]], ['Shou', [2, 5]], ['Elbow', [3, 6]],
['Wrist', [4, 7]], ['Hip', [8, 11]], ['Knee', [9, 12]], ['Ankle', [10, 13]]]
scored_joints = np.concatenate([x[1] for x in joint_groups]) # Those joints that take part in scoring
my_matching_inds = []
all_perjoint_errors = {}
pck_by_sequence = {}
auc_by_sequence = {}
for seq in range(1, 21):
gt = load_gt_annotations(seq)
num_frames = gt['annot2'].shape[0]
gt_poses = []
pred_poses = []
valid_pred = []
for i in range(num_frames):
gt_pose_2d = gt['annot2'][i][gt['isValidFrame'][i]]
gt_pose_3d = gt[pose3d_type][i][gt['isValidFrame'][i]]
# gt_visibility = ~gt['occlusions'][i][gt['isValidFrame'][i]]
gt_visibility = np.ones(gt_pose_2d.shape[:2], dtype='bool')
pred_pose_2d = preds_2d_kpt[seq][i]
pred_pose_3d = preds_3d_kpt[seq][i]
pred_visibility = np.ones(pred_pose_2d.shape[:2], dtype='bool')
# matching between 2D points
if keep_matching:
pair_inds = np.arange(gt['annot2'].shape[1])[gt['isValidFrame'][i]]
else:
pair_inds = _match_poses(gt_pose_2d[:, joints_for_matching], gt_visibility[:, joints_for_matching],
pred_pose_2d[:, joints_for_matching], pred_visibility[:, joints_for_matching],
40)
my_matching_inds.append(pair_inds)
has_pair = pair_inds >= 0
# Reorder predicted poses to match Gt poses. If a GT pose does not have a pair, it is filled with 1e5
reordered_pose_3d = 100000 * np.ones_like(gt_pose_3d) # (nGtPoses, nJoints, 3)
reordered_pose_3d[has_pair] = pred_pose_3d[pair_inds[has_pair]] # (nGtPoses, nJoints, 3)
gt_poses.append(gt_pose_3d)
pred_poses.append(reordered_pose_3d)
valid_pred.append(has_pair)
gt_poses = np.concatenate(gt_poses)
pred_poses = np.concatenate(pred_poses)
valid_pred = np.concatenate(valid_pred)
if is_relative:
hip_ind = MuPoTSJoints().index_of('hip')
gt_poses -= gt_poses[:, [hip_ind]]
pred_poses -= pred_poses[:, [hip_ind]]
# calculating per joint errors
pred_poses = _scale_to_gt(pred_poses, gt_poses)
pred_poses[~valid_pred] = 100000
errors = np.linalg.norm(gt_poses - pred_poses, axis=2) # (nGtPoses, nJoints)
if matched_only:
errors = errors[valid_pred]
pck_by_sequence[seq] = np.mean(errors[:, scored_joints] < 150) * 100
auc_by_sequence[seq] = np.mean([np.mean(errors[:, scored_joints] < t) for t in AUC_THRESHOLDS]) * 100
all_perjoint_errors[seq] = errors
return pck_by_sequence, auc_by_sequence
| 11,443 | 36.768977 | 119 | py |
pose_refinement | pose_refinement-master/src/databases/joint_sets.py | import numpy as np
from util.misc import assert_shape
# SIDEDNESS
# 0 - right
# 1 - left
# 2 - center
class JointSet:
def index_of(self, joint_name):
joint_inds = np.where(self.NAMES == joint_name)[0]
assert len(joint_inds) > 0, "No joint called " + joint_name
return joint_inds[0]
def flip(self, data):
""" Flips a dataset """
assert_shape(data, ('*', self.NUM_JOINTS, None))
data = data.copy()
data[...,self.JOINTS_LEFT+self.JOINTS_RIGHT,:] = data[...,self.JOINTS_RIGHT+self.JOINTS_LEFT,:]
return data
class MuPoTSJoints(JointSet):
NAMES = np.array(["head_top", 'neck', 'right_shoulder', 'right_elbow', 'right_wrist', # 0-4
'left_shoulder', 'left_elbow', 'left_wrist', # 5-7
'right_hip', 'right_knee', 'right_ankle', 'left_hip', 'left_knee', 'left_ankle', # 8-13
'hip', 'spine', 'head/nose'])
NUM_JOINTS = 17
TO_COMMON14 = [14, 8, 9, 10, 11, 12, 13, 1, 5, 6, 7, 2, 3, 4]
LIMBGRAPH = [(10, 9), (9, 8), (8, 14), # rleg
(13, 12), (12, 11), (11, 14), # llel
(0, 16), (16, 1), # head to thorax
(1, 15), (15, 14), # thorax to hip
(4, 3), (3, 2), (2, 1), # rarm
(7, 6), (6, 5), (5, 1), # larm
]
SIDEDNESS = [0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1]
JOINTS_LEFT = [5, 6, 7, 11, 12, 13]
JOINTS_RIGHT = [2, 3, 4, 8, 9, 10]
NAMES.flags.writeable = False
class OpenPoseJoints(JointSet):
NAMES = np.array(['nose', 'neck', 'right_shoulder', 'right_elbow', 'right_wrist', 'left_shoulder', 'left_elbow',
'left_wrist', 'hip', 'right_hip', 'right_knee', 'right_ankle', 'left_hip', 'left_knee', 'left_ankle',
'right_eye', 'left_eye', 'right_ear', 'left_ear',
'left_bigtoe', 'left_smalltoe', 'left_heel', 'right_bigtoe', 'right_smalltoe', 'right_heel'])
NUM_JOINTS = 25
TO_COMMON14 = [8, 9, 10, 11, 12, 13, 14, # hip, rleg, lleg
1, 5, 6, 7, 2, 3, 4] # neck/thorax, larm, rarm
STABLEJOINTS = np.arange(17)
NAMES.flags.writeable = False
class PanopticJoints(JointSet):
NAMES = np.array(['neck', 'nose', 'hip',
'left_shoulder', 'left_elbow', 'left_wrist', # 3-5
'left_hip', 'left_knee', 'left_ankle', # 6-8
'right_shoulder', 'right_elbow', 'right_wrist', # 9-11
'right_hip', 'right_knee', 'right_ankle', # 12-14
'left_eye', 'left_ear', 'right_eye', 'right_ear'])
NUM_JOINTS = 19
TO_COMMON14 = [2, 12, 13, 14, 6, 7, 8, 0, 3, 4, 5, 9, 10, 11] # neck/thorax, larm, rarm
LIMBGRAPH = [(0, 1), (0, 2), # spine
(0, 3), (3, 4), (4, 5), # larm
(2, 6), (6, 7), (7, 8), # lleg
(0, 9), (9, 10), (10, 11), # rarm
(2, 12), (12, 13), (13, 14), # rleg
(1, 15), (15, 16), # lface
(1, 17), (17, 18) # rface
]
SIDEDNESS = [2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2]
NAMES.flags.writeable = False
class CocoExJoints(JointSet):
NAMES = np.array(["nose", "left_eye", "right_eye", "left_ear", "right_ear", # 0-4
"left_shoulder", "right_shoulder", "left_elbow", "right_elbow", "left_wrist", "right_wrist", # 5-10
"left_hip", "right_hip", "left_knee", "right_knee", "left_ankle", "right_ankle", # 11-16
"hip", "neck"])
NUM_JOINTS = 19
TO_COMMON14 = [17, 12, 14, 16, 11, 13, 15, 18, 5, 7, 9, 6, 8, 10]
LIMBGRAPH = [(0, 1), (1, 3), # lface
(0, 2), (2, 4), # rface
(0, 18), (18, 17), # spine
(18, 5), (5, 7), (7, 9), # larm
(18, 6), (6, 8), (8, 10), # rarm
(17, 11), (11, 13), (13, 15), # lleg
(17, 12), (12, 14), (14, 16)] # rleg
SIDEDNESS = [1, 1, 0, 0, 2, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0]
JOINTS_LEFT = [1, 3, 5, 7, 9, 11, 13, 15]
JOINTS_RIGHT = [2, 4, 6, 8, 10, 12, 14, 16]
NAMES.flags.writeable = False
class Common14Joints(JointSet):
NAMES = np.array(['hip', 'right_hip', 'right_knee', 'right_ankle', 'left_hip', 'left_knee', 'left_ankle', 'neck',
'left_shoulder', 'left_elbow', 'left_wrist', 'right_shoulder', 'right_elbow', 'right_wrist'])
NUM_JOINTS = 14
TO_COMMON14 = np.arange(14)
LIMBGRAPH = [(0, 1), (1, 2), (2, 3), # rleg
(0, 4), (4, 5), (5, 6), # lleg
(0, 7), # spine
(7, 8), (8, 9), (9, 10), # larm
(7, 11), (11, 12), (12, 13)] # rarm
SIDEDNESS = [0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 0, 0, 0]
NAMES.flags.writeable = False
| 4,916 | 39.636364 | 123 | py |
pose_refinement | pose_refinement-master/src/databases/muco_temp.py | import os
import cv2
from util.misc import load
MUCO_TEMP_PATH = '../datasets/MucoTemp'
def get_frame(cam, vid_id, frame_ind, rgb=True):
path = os.path.join(MUCO_TEMP_PATH, 'frames/cam_%d/vid_%d' % (cam, vid_id), 'img_%04d.jpg' % frame_ind)
img = cv2.imread(path)
if rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def get_metadata():
""" Returns metadata for each video in the video. It contains the original videos and starting frames each video contains. """
return load(os.path.join(MUCO_TEMP_PATH, 'sequence_meta.pkl'))
def load_gt(cam):
return load(os.path.join(MUCO_TEMP_PATH, 'frames', 'cam_%d' % cam, 'gt.pkl'))
def load_hrnet(cam, vid):
return load(os.path.join(MUCO_TEMP_PATH, 'hrnet_keypoints', 'cam_%d' % cam, 'gt_match_posedist_80', 'vid_%d.pkl' % vid))
| 830 | 27.655172 | 130 | py |
pose_refinement | pose_refinement-master/src/databases/datasets.py | import os
import h5py
import numpy as np
from torch.utils.data import Dataset
from databases import mupots_3d, mpii_3dhp, muco_temp
from databases.joint_sets import CocoExJoints, OpenPoseJoints, MuPoTSJoints
class PoseDataset(Dataset):
""" Subclasses should have the attributes poses2d/3d, pred_cdepths, pose[2|3]d_jointset defined."""
def filter_dataset(self, inds):
"""
Filters the dataset by ``inds``.
:param inds: anything that can be used for numpy masking
"""
if hasattr(self, 'coord_depths'):
self.coord_depths = self.coord_depths[inds]
if hasattr(self, 'width'):
self.width = self.width[inds]
self.pred_cdepths = self.pred_cdepths[inds]
self.poses2d = self.poses2d[inds]
self.poses3d = self.poses3d[inds]
self.gt_depth_2d = None # self.gt_depth_2d[good_poses], gt_depth_2d is not implemented for compressed videos,
# and is is ignored for CoordDepthDataset anywat
self.index = self.index[inds]
self.fx = self.fx[inds]
self.fy = self.fy[inds]
self.cx = self.cx[inds]
self.cy = self.cy[inds]
class AugmentMixin:
def augment(self, scale_by_dist, scales=None):
"""
Augments the data in a pose dataset. It simulates moving the poses
closer and further away from the camera. The method takes the dataset D, applies a transformation T,
and concatenates the transformed data to the original data.
:param scale_by_dist: If true, during augmentation it scales values with l2 distance from camera,
otherwise with z coordinate (depth).
:param scales: if defined, values in this array used for scaling instead of random values
"""
assert isinstance(self.pose3d_jointset, MuPoTSJoints), "only implemented for MupoTS joints"
orig_size = len(self.poses2d)
root_ind = MuPoTSJoints().index_of('hip')
# Calculating minimum scale to avoid joints behind camera
if scales is None:
limb_vec = self.poses3d[:, :, 2] - self.poses3d[:, [root_ind], 2]
min_scale = np.nanmax(-limb_vec / self.poses3d[:, [root_ind], 2], axis=1)
scales = np.random.normal(1, 0.25, orig_size)
scales[scales < 0.6] = 1
scales = np.maximum(scales, min_scale + 1e-5)
scales[scales > 1.5] = 1
scales = scales.reshape((-1, 1))
else:
assert scales.ndim == 2, "scales is expected to be a column vector"
self.scales = scales.copy()
# Duplicate all the training data, the first half is the original unchanged,
# the second half is augmented
for field in ['poses2d', 'poses3d', 'pred_cdepths', 'fx', 'fy', 'cx', 'cy', 'width', 'valid_2d_pred']:
if hasattr(self, field):
data = self.__getattribute__(field)
self.__setattr__(field, np.concatenate([data, data.copy()]))
if hasattr(self, 'index'):
self.index = np.concatenate([self.index, self.index.copy()])
# Calculate the new 3D coordinates of the poses
orig_roots = np.expand_dims(self.poses3d[orig_size:, root_ind, :].copy(), 1) # (nPoses, 1, 3)
new_roots = orig_roots * np.expand_dims(scales, 1)
self.poses3d[orig_size:, :, :] = self.poses3d[orig_size:, :, :] - orig_roots + new_roots
pose2d_root_ind = self.pose2d_jointset.index_of('hip')
self.poses2d[orig_size:, :, :2] = (self.poses2d[orig_size:, :, :2]
- self.poses2d[orig_size:, [pose2d_root_ind], :2]) / scales[:, :, None] \
+ self.poses2d[orig_size:, [pose2d_root_ind], :2]
assert np.all((self.poses3d[:, :, 2] >= 0) | np.isnan(self.poses3d[:, :, 2])), "Joint behind camera"
class TemporalAugmentMixin(AugmentMixin):
def augment(self, scale_by_dist, scales=None):
orig_len = len(self.poses2d)
if scales is None:
# creating scales such that poses on a single frame have the same scale
root_ind = self.pose3d_jointset.index_of('hip')
limb_vec = self.poses3d[:, :, 2] - self.poses3d[:, [root_ind], 2]
min_scales = np.nanmax(-limb_vec / self.poses3d[:, [root_ind], 2], axis=1)
scales = np.ones(len(self.poses2d), dtype='float32')
seqs = sorted(np.unique(self.index.seq))
for seq in seqs:
inds = self.index.seq == seq
# print(np.sum(inds), seq, self.index.seq)
min_scale = np.max(min_scales[inds])
scale = np.random.normal(1, 0.2)
scale = max(scale, 0.6)
scale = max(scale, min_scale + 1e-5)
scale = min(scale, 1.5)
scales[inds] = scale
scales = scales[:, np.newaxis]
super().augment(scale_by_dist, scales)
self.index = np.rec.array(self.index)
for i in range(orig_len, 2 * orig_len):
self.index.seq[i] = self.index.seq[i] + 'A'
class FlippableDataset(PoseDataset):
def __len__(self):
return len(self.poses2d)
def get_samples(self, ind, flip):
"""
:param ind: indices of the elements to extract
:param flip: true if elements should be flipped all of them
"""
sample = self.prepare_sample(ind)
if isinstance(flip, np.ndarray) or flip:
if not isinstance(flip, np.ndarray):
flip = np.full(len(ind), flip, dtype='bool')
pose2d = sample['pose2d'].copy()
pose2d[flip, ..., 0] = np.expand_dims(sample['width'][flip], 1) - pose2d[flip, ..., 0]
pose2d[flip] = self.pose2d_jointset.flip(pose2d[flip])
sample['pose2d'] = pose2d
pose3d = sample['pose3d'].copy()
pose3d[flip, ..., 0] *= -1
pose3d[flip] = self.pose3d_jointset.flip(pose3d[flip])
sample['pose3d'] = pose3d
cx = sample['cx'].copy()
cx[flip] = sample['width'][flip] - cx[flip]
sample['cx'] = cx
if self.transform:
sample = self.transform(sample)
return sample
def __getitem__(self, ind):
return self.get_samples(ind, False)
class ConcatPoseDataset(FlippableDataset, TemporalAugmentMixin):
def __init__(self, data1, data2):
self.data1 = data1
self.data2 = data2
fields = ['poses2d', 'poses3d', 'fx', 'fy', 'cx', 'cy', 'valid_2d_pred']
for field in fields:
field1 = data1.__getattribute__(field)
field2 = data2.__getattribute__(field)
self.__setattr__(field, np.concatenate([field1, field2]))
seqs = np.concatenate([data1.index.seq, data2.index.seq])
self.index = np.recarray(len(seqs), [('seq', seqs.dtype)])
self.index.seq = seqs
assert type(data1.pose2d_jointset) == type(data2.pose2d_jointset)
assert type(data1.pose3d_jointset) == type(data2.pose3d_jointset)
self.pose2d_jointset = data1.pose2d_jointset
self.pose3d_jointset = data1.pose3d_jointset
self.transform = None
def prepare_sample(self, ind):
if isinstance(ind, (list, tuple, np.ndarray)):
width = np.full(len(ind), 2048, dtype='int32')
else:
width = 2048
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.valid_2d_pred[ind], 'cx': self.cx[ind], 'width': width}
return sample
def _column_stack(data):
""" Columnwise stacks an ndarray"""
return data.reshape((-1,) + data.shape[2:], order='F').copy()
class PersonStackedMuPoTsDataset(FlippableDataset):
def __init__(self, pose2d_type, pose3d_scaling, pose_validity='detected_only', hip_threshold=-1):
"""
Loads MuPoTS dataset but only those images where at least one person was detected. Each person on a frame
is loaded separately.
:param pose_validity: one of 'all', 'detected_only', 'valid_only'; specifies which poses are marked valid
all - all of them; valid_only - those that are valid according to the GT annotations
detected_only - those that were successfuly detected by the 2D algon and also valid
:param hip_threshold: only those poses are loaded, where the score of the hip is larger than this value
:param filter_incorrect_match: MuPoTS's pose matching script has some erroneous matching. If filter_incorrect_match is True,
these are not loaded.
"""
assert pose_validity in ['all', 'detected_only', 'valid_only']
assert pose3d_scaling in ['univ', 'normal']
self.pose2d_jointset = PersonStackedMuPoTsDataset.get_jointset(pose2d_type)
self.pose3d_jointset = MuPoTSJoints()
self.pose3d_scaling = pose3d_scaling
pred2d_root_ind = self.pose2d_jointset.index_of('hip')
poses2d = []
poses3d = []
joint3d_visible = []
all_good_poses = []
valid_annotations = []
width = []
index = []
for seq in range(1, 21):
img_width, img_height = mupots_3d.image_size(seq)
gt = mupots_3d.load_gt_annotations(seq)
pred2d = mupots_3d.load_2d_predictions(seq, pose2d_type)
pose2d = pred2d['pose']
pose3d = gt['annot3' if pose3d_scaling == 'normal' else 'univ_annot3']
visibility = ~gt['occlusions']
if pose_validity == 'all':
good_poses = np.full(pose3d.shape[:2], True, dtype='bool')
elif pose_validity == 'valid_only':
good_poses = gt['isValidFrame'].squeeze()
elif pose_validity == 'detected_only':
good_poses = gt['isValidFrame'].squeeze()
good_poses = np.logical_and(good_poses, pred2d['valid_pose'])
good_poses = np.logical_and(good_poses, pose2d[:, :, pred2d_root_ind, 2] > hip_threshold)
else:
raise NotImplementedError("Unknown pose_validity value:" + pose_validity)
orig_frame = np.tile(np.arange(len(good_poses)).reshape(-1, 1), (1, good_poses.shape[1]))
orig_pose = np.tile(np.arange(good_poses.shape[1]).reshape(1, -1), (good_poses.shape[0], 1))
assert pose2d.shape[:2] == good_poses.shape # (nFrames, nPeople)
assert pose3d.shape[:2] == good_poses.shape
assert orig_frame.shape == good_poses.shape
assert orig_pose.shape == good_poses.shape
assert pose2d.shape[2:] == (self.pose2d_jointset.NUM_JOINTS, 3)
assert pose3d.shape[2:] == (17, 3)
assert visibility.shape[2] == 17
assert good_poses.ndim == 2
orig_frame = _column_stack(orig_frame)
orig_pose = _column_stack(orig_pose)
index.extend([('%d/%d' % (seq, orig_pose[i]), seq, orig_frame[i], orig_pose[i]) for i in range(len(orig_frame))])
poses2d.append(_column_stack(pose2d))
poses3d.append(_column_stack(pose3d))
joint3d_visible.append(_column_stack(visibility))
all_good_poses.append(_column_stack(good_poses))
valid_annotations.append(_column_stack(gt['isValidFrame']))
width.extend([img_width] * len(orig_frame))
self.poses2d = np.concatenate(poses2d).astype('float32')
self.poses3d = np.concatenate(poses3d).astype('float32')
self.joint3d_visible = np.concatenate(joint3d_visible)
self.good_poses = np.concatenate(all_good_poses)
self.valid_annotations = np.concatenate(valid_annotations)
self.width = np.array(width)
self.index = np.rec.array(index, dtype=[('seq', 'U5'), ('seq_num', 'int32'), ('frame', 'int32'), ('pose', 'int32')])
assert self.valid_annotations.shape == self.good_poses.shape
assert len(self.valid_annotations) == len(self.poses2d)
# Load calibration matrices
N = len(self.poses2d)
self.fx = np.zeros(N, dtype='float32')
self.fy = np.zeros(N, dtype='float32')
self.cx = np.zeros(N, dtype='float32')
self.cy = np.zeros(N, dtype='float32')
mupots_calibs = mupots_3d.get_calibration_matrices()
for seq in range(1, 21):
inds = (self.index.seq_num == seq)
self.fx[inds] = mupots_calibs[seq][0, 0]
self.fy[inds] = mupots_calibs[seq][1, 1]
self.cx[inds] = mupots_calibs[seq][0, 2]
self.cy[inds] = mupots_calibs[seq][1, 2]
assert np.all(self.fx > 0), "Some fields were not filled"
assert np.all(self.fy > 0), "Some fields were not filled"
assert np.all(np.abs(self.cx) > 0), "Some fields were not filled"
assert np.all(np.abs(self.cy) > 0), "Some fields were not filled"
self.transform = None
@staticmethod
def get_jointset(pose2d_type):
if pose2d_type == 'openpose':
return OpenPoseJoints()
elif pose2d_type == 'hrnet':
return CocoExJoints()
else:
raise Exception("Unknown 2D pose type: " + pose2d_type)
def prepare_sample(self, ind):
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.good_poses[ind], 'cx': self.cx[ind], 'width': self.width[ind]}
return sample
class Mpi3dTestDataset(FlippableDataset):
def __init__(self, pose2d_type, pose3d_scaling, eval_frames_only=False):
assert pose2d_type == 'hrnet', "Only hrnet 2d is implemented"
assert pose3d_scaling in ['normal', 'univ'], \
"Unexpected pose3d scaling type: " + str(pose3d_scaling)
self.transform = None
self.eval_frames_only = eval_frames_only
pose3d_key = 'annot3' if pose3d_scaling == 'normal' else 'univ_annot3'
poses2d = []
poses3d = []
valid_2d_pred = [] # True if HR-net found a pose
valid_frame = [] # True if MPI-INF-3DHP marked the frame as valid
fx = []
fy = []
cx = []
cy = []
width = []
index = []
for seq in range(1, 7):
gt = h5py.File(os.path.join(mpii_3dhp.MPII_3DHP_PATH,
'mpi_inf_3dhp_test_set', 'TS%d' % seq, 'annot_data.mat'), 'r')
poses3d.append(gt[pose3d_key][:, 0])
valid_frame.append(gt['valid_frame'][()] == 1)
num_frames = len(poses3d[-1]) # The annotations are shorter than the number of images
tmp = mpii_3dhp.test_poses_hrnet(seq)
poses2d.append(tmp['poses'])
valid_2d_pred.append(tmp['is_valid'])
assert len(poses3d[-1]) == len(poses2d[-1]), "Gt and predicted frames are not aligned, seq:" + str(seq)
index.extend([(seq, i) for i in range(num_frames)])
calibration_mx = mpii_3dhp.get_test_calib(seq)
fx.extend([calibration_mx[0, 0]] * num_frames)
fy.extend([calibration_mx[1, 1]] * num_frames)
cx.extend([calibration_mx[0, 2]] * num_frames)
cy.extend([calibration_mx[1, 2]] * num_frames)
width.extend([2048 if seq < 5 else 1920] * num_frames)
self.pose2d_jointset = CocoExJoints()
self.pose3d_jointset = MuPoTSJoints()
self.poses2d = np.concatenate(poses2d)
self.poses3d = np.concatenate(poses3d)
self.valid_2d_pred = np.concatenate(valid_2d_pred)
valid_frame = np.concatenate(valid_frame)
assert valid_frame.shape[1] == 1, valid_frame.shape
valid_frame = valid_frame[:, 0]
self.index = np.rec.array(index, dtype=[('seq', 'int32'), ('frame', 'int32')])
self.fx = np.array(fx, dtype='float32')
self.fy = np.array(fy, dtype='float32')
self.cx = np.array(cx, dtype='float32')
self.cy = np.array(cy, dtype='float32')
self.width = np.array(width, dtype='int32')
assert len(self.poses2d) == len(self.index), len(self.index)
# keep only those frame where a pose was detected
good_poses = self.valid_2d_pred.copy()
if eval_frames_only:
good_poses = good_poses & valid_frame
self.good_poses = good_poses
assert len(self.poses2d) == len(self.poses3d)
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.valid_2d_pred), len(self.valid_2d_pred)
assert len(self.poses2d) == len(self.fx), len(self.fx)
assert len(self.poses2d) == len(self.fy), len(self.fy)
assert len(self.poses2d) == len(self.cx), len(self.cx)
assert len(self.poses2d) == len(self.cy), len(self.cy)
assert len(self.poses2d) == len(self.width), len(self.width)
assert len(self.poses2d) == len(self.good_poses), len(self.good_poses)
def prepare_sample(self, ind):
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.good_poses[ind], 'cx': self.cx[ind], 'width': self.width[ind]}
return sample
class Mpi3dTrainDataset(FlippableDataset, TemporalAugmentMixin):
def __init__(self, pose2d_type, pose3d_scaling, cap_at_25fps, stride=1):
assert pose2d_type == 'hrnet', "Only hrnet 2d is implemented"
assert pose3d_scaling in ['normal', 'univ'], \
"Unexpected pose3d scaling type: " + str(pose3d_scaling)
self.transform = None
pose3d_key = 'annot3' if pose3d_scaling == 'normal' else 'univ_annot3'
poses2d = []
poses3d = []
valid_2d_pred = [] # True if HR-net found a pose
fx = []
fy = []
cx = []
cy = []
index = []
sequences = []
calibs = mpii_3dhp.get_calibration_matrices()
for sub in range(1, 9):
for seq in range(1, 3):
gt = mpii_3dhp.train_ground_truth(sub, seq)
for cam in range(11):
# In S3/Seq2 cam2 there are some frame between 9400-9900 where the pose is
# behind the camera/nearly in the camera plane. This breaks training.
# For simplicity, ignore the whole set but ignoring frames 9400-9900
# would also work
if seq == 2 and sub == 3 and cam == 2:
continue
# Find indices that are selected for the dataset
inds = np.arange(len(gt[pose3d_key][cam]))
if cap_at_25fps and mpii_3dhp.get_train_fps(sub, seq) == 50:
inds = inds[::2]
inds = inds[::stride]
num_frames = len(inds)
poses3d.append(gt[pose3d_key][cam][inds])
tmp = mpii_3dhp.train_poses_hrnet(sub, seq, cam)
poses2d.append(tmp['poses'][inds])
valid_2d_pred.append(tmp['is_valid'][inds])
assert len(poses3d[-1]) == len(poses2d[-1]
), "Gt and predicted frames are not aligned, seq:" + str(seq)
seq_name = 'S%d/Seq%d/%d' % (sub, seq, cam)
sequences.append(seq_name)
index.extend([(seq_name, sub, seq, cam, i) for i in inds])
calibration_mx = calibs[(sub, seq, cam)]
fx.extend([calibration_mx[0, 0]] * num_frames)
fy.extend([calibration_mx[1, 1]] * num_frames)
cx.extend([calibration_mx[0, 2]] * num_frames)
cy.extend([calibration_mx[1, 2]] * num_frames)
self.pose2d_jointset = CocoExJoints()
self.pose3d_jointset = MuPoTSJoints()
self.poses2d = np.concatenate(poses2d)
self.poses3d = np.concatenate(poses3d)
self.valid_2d_pred = np.concatenate(valid_2d_pred)
self.index = np.rec.array(index, dtype=[('seq', 'U12'), ('sub', 'int32'), ('subseq', 'int32'),
('cam', 'int32'), ('frame', 'int32')])
self.fx = np.array(fx, dtype='float32')
self.fy = np.array(fy, dtype='float32')
self.cx = np.array(cx, dtype='float32')
self.cy = np.array(cy, dtype='float32')
self.sequences = sorted(sequences)
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.poses3d)
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.valid_2d_pred), len(self.valid_2d_pred)
assert len(self.poses2d) == len(self.fx), len(self.fx)
assert len(self.poses2d) == len(self.fy), len(self.fy)
assert len(self.poses2d) == len(self.cx), len(self.cx)
assert len(self.poses2d) == len(self.cy), len(self.cy)
def filter_dataset(self, inds):
super().filter_dataset(inds)
self.sequences = sorted(np.unique(self.index.seq))
def prepare_sample(self, ind):
if isinstance(ind, (list, tuple, np.ndarray)):
width = np.full(len(ind), 2048, dtype='int32')
else:
width = 2048
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.valid_2d_pred[ind], 'cx': self.cx[ind], 'width': width}
return sample
class PersonStackedMucoTempDataset(FlippableDataset, TemporalAugmentMixin):
""" This dataset contains Muco-Temp poses, poses on the same frame are separated. """
def __init__(self, pose2d_type, pose3d_scaling):
assert pose2d_type == 'hrnet', "only hrnet is implemented"
assert pose3d_scaling in ['univ', 'normal']
self.transform = None
self.pose2d_jointset = PersonStackedMuPoTsDataset.get_jointset(pose2d_type)
self.pose3d_jointset = MuPoTSJoints()
pose3d_key = 'annot3' if pose3d_scaling == 'normal' else 'univ_annot3'
poses2d = []
poses3d = []
valid_2d_pred = [] # True if HR-net found a pose
fx = []
fy = []
cx = []
cy = []
index = []
calibs = mpii_3dhp.get_calibration_matrices()
meta_data = muco_temp.get_metadata()
for cam in range(11):
gt = muco_temp.load_gt(cam)
for vid in range(7):
orig_shape = gt[vid][pose3d_key].shape # (nFrames, nPoses, nJoints, 3)
poses3d.append(_column_stack(gt[vid][pose3d_key]))
kp = muco_temp.load_hrnet(cam, vid)
poses2d.append(_column_stack(kp['poses']))
valid_2d_pred.append(_column_stack(kp['is_valid']))
assert len(poses3d[-1]) == len(poses2d[-1]), \
"Gt and predicted frames are not aligned, cam:" + str(cam)
orig_frame = np.tile(np.arange(orig_shape[0]).reshape(-1, 1), (1, orig_shape[1]))
orig_pose = np.tile(np.arange(orig_shape[1]).reshape(1, -1), (orig_shape[0], 1))
orig_frame = _column_stack(orig_frame) # (nFrames*nPoses,)
orig_pose = _column_stack(orig_pose)
index.extend([('%d/%d/%d' % (cam, vid, orig_pose[i]), cam, vid, orig_frame[i], orig_pose[i])
for i in range(len(orig_frame))])
for pose_ind in range(orig_shape[1]):
sub, seq, _ = meta_data[cam][vid][pose_ind]
calibration_mx = calibs[(sub, seq, cam)]
fx.extend([calibration_mx[0, 0]] * orig_shape[0])
fy.extend([calibration_mx[1, 1]] * orig_shape[0])
cx.extend([calibration_mx[0, 2]] * orig_shape[0])
cy.extend([calibration_mx[1, 2]] * orig_shape[0])
self.poses2d = np.concatenate(poses2d)
self.poses3d = np.concatenate(poses3d)
self.valid_2d_pred = np.concatenate(valid_2d_pred)
self.index = np.rec.array(index, dtype=[('seq', 'U12'), ('cam', 'int32'), ('vid', 'int32'),
('frame', 'int32'), ('pose', 'int32')])
self.fx = np.array(fx, dtype='float32')
self.fy = np.array(fy, dtype='float32')
self.cx = np.array(cx, dtype='float32')
self.cy = np.array(cy, dtype='float32')
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.poses3d)
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.valid_2d_pred), len(self.valid_2d_pred)
assert len(self.poses2d) == len(self.fx), len(self.fx)
assert len(self.poses2d) == len(self.fy), len(self.fy)
assert len(self.poses2d) == len(self.cx), len(self.cx)
assert len(self.poses2d) == len(self.cy), len(self.cy)
def prepare_sample(self, ind):
if isinstance(ind, (list, tuple, np.ndarray)):
width = np.full(len(ind), 2048, dtype='int32')
else:
width = 2048
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.valid_2d_pred[ind], 'cx': self.cx[ind], 'width': width}
return sample
def pose_grid_from_index(keys):
"""
From an array of frame ids returns the id of the frame and the pose in that frame.
These can be used to reshape arrays containing stacked poses.
Parameters:
keys: ids of frames. It is expected that poses in the same frame are next to each other (in other words,
if keys[i]==keys[j], then for all i<=k<=j keys[k]==keys[i])
"""
different = keys[1:] != keys[:-1]
different = np.concatenate([[True], different]) # True if the current record is on a new frame compared to the previous record
# frame_start will hold the index of the first pose of the current frame
frame_start = -np.ones(len(different), dtype='int64')
frame_start[different] = np.arange(len(different))[different] # if different True, then contains the index, otherwise a -1
# frame_start[i]==-1 if it is on the same frame as the previous pose, so max copies the prev value
frame_start = np.maximum.accumulate(frame_start)
pose_ind = np.arange(len(different)) - frame_start
return keys, pose_ind
def reshape_posearray(frame_ind, pose_ind, array):
"""
Reshapes an array that is aligned with a stacked pose array into one
that is aligned to a by-frame grouped array. Unused places in the output are nan-ed out for
floating point types. Other types are kept.
NOTE: uses hardcoded number of poses, as in this project max people on a frame is 6.
"""
assert np.max(pose_ind) < 6, "In this code the maximum number of poses per frame is hardcoded to 6"
num_frames = np.max(frame_ind) + 1
shape = (num_frames, 6) + array.shape[1:]
result = np.zeros(shape, dtype=array.dtype)
if array.dtype == 'float32' or array.dtype == 'float64':
result = result * np.nan
elif isinstance(array, np.recarray):
result = np.rec.array(result)
result[frame_ind, pose_ind] = array
return result
| 27,399 | 42.149606 | 132 | py |
pose_refinement | pose_refinement-master/src/databases/mpii_3dhp.py | import os
import cv2
import numpy as np
from databases.joint_sets import MuPoTSJoints
from util.misc import load
MPII_3DHP_PATH = '../datasets/Mpi3DHP'
def test_frames(seq):
frames = sorted(os.listdir(os.path.join(MPII_3DHP_PATH, 'mpi_inf_3dhp_test_set', 'TS%d' % seq, 'imageSequence')))
# In TS3/TS4 last (two) frames do not have annotations
if seq == 3:
frames = frames[:-1]
elif seq == 4:
frames = frames[:-2]
return frames
def num_test_frames(seq):
num_frames = len(os.listdir(os.path.join(MPII_3DHP_PATH, 'mpi_inf_3dhp_test_set', 'TS%d' % seq, 'imageSequence')))
# In TS3/TS4 last (two) frames do not have annotations
if seq == 3:
num_frames -= 1
elif seq == 4:
num_frames -= 2
return num_frames
def get_test_image(seq, frame_ind, rgb=True):
""" frame_ind is indexed from 0, while filename is indexed from 1!!"""
img = cv2.imread(os.path.join(MPII_3DHP_PATH, 'mpi_inf_3dhp_test_set',
'TS%d' % seq, 'imageSequence', 'img_%06d.jpg' % (frame_ind + 1)))
if rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def get_image(subject, sequence, camera, frame, rgb=True):
img = cv2.imread(os.path.join(MPII_3DHP_PATH, 'frames', 'S%d' % subject, 'Seq%d' % sequence, 'imageSequence',
"img_%d_%06d.jpg" % (camera, frame + 1)))
if rgb:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def get_mask(subject, sequence, camera, frame, mask_type):
assert mask_type in ['ChairMasks', 'FGmasks'], "unknown mask type: " + mask_type
img = cv2.imread(
os.path.join(MPII_3DHP_PATH, 'frames', 'S%d' % subject, 'Seq%d' % sequence, mask_type, "img_%d_%06d.jpg" % (camera, frame + 1)))
return img
def get_train_fps(sub, seq):
assert 1 <= sub <= 8
assert seq in (1, 2)
if sub == 3 or sub == 5 or (sub == 1 and seq == 2):
return 50
else:
return 25
def test_poses_hrnet(seq):
return load(os.path.join(MPII_3DHP_PATH, 'mpi_inf_3dhp_test_set', 'TS%d' % seq, 'hrnet.pkl'))
def train_poses_hrnet(sub, seq, cam):
return load(os.path.join(MPII_3DHP_PATH, 'S%d' % sub, 'Seq%d' % seq, 'hrnet', 'hrnet_%02d.pkl' % cam))
# filters out the relevant 17 joints from the raw annot.mat files. Based on mpii_get_joint_set.m
MUPOTS_RELEVANT_JOINTS = np.array([8, 6, 15, 16, 17, 10, 11, 12, 24, 25, 26, 19, 20, 21, 5, 4, 7]) - 1
def train_ground_truth(sub, seq, fix_incorrect=True):
"""
Returns the ground truth annotations. Returns a dict with fields 'annot2', 'annot3', 'univ_annot3'
:param fix_incorrect: S4/Seq2 has annotations flipped on some frames, if True they are flipped back
:return:
"""
annot = load(os.path.join(MPII_3DHP_PATH, 'S%d' % sub, 'Seq%d' % seq, 'annot.mat'))
annot2 = list([x[0].reshape((-1, 28, 2))[:, MUPOTS_RELEVANT_JOINTS].astype('float32') for x in annot['annot2']])
annot3 = list([x[0].reshape((-1, 28, 3))[:, MUPOTS_RELEVANT_JOINTS].astype('float32') for x in annot['annot3']])
univ_annot3 = list([x[0].reshape((-1, 28, 3))[:, MUPOTS_RELEVANT_JOINTS].astype('float32') for x in annot['univ_annot3']])
assert np.all(annot['cameras'][0] == np.arange(14))
assert np.all(annot['frames'][:, 0] == np.arange(len(annot2[0])))
# S3/Seq1 has one extra annotation but one less frame
# Remove the very last annotation from everywhere
if sub == 3 and seq == 1:
for cam in range(14):
annot2[cam] = annot2[cam][:-1]
annot3[cam] = annot3[cam][:-1]
univ_annot3[cam] = univ_annot3[cam][:-1]
if sub == 4 and seq == 2 and fix_incorrect:
# between 3759(in) and 5853(ex) annotations are flipped
for cam in range(14):
annot2[cam][3759:5853] = MuPoTSJoints().flip(annot2[cam][3759:5853])
annot3[cam][3759:5853] = MuPoTSJoints().flip(annot3[cam][3759:5853])
univ_annot3[cam][3759:5853] = MuPoTSJoints().flip(univ_annot3[cam][3759:5853])
N = len(annot2[0])
for cam in range(14):
assert len(annot2[cam]) == N
assert len(annot3[cam]) == N
assert len(univ_annot3[cam]) == N
result = {'annot2': annot2, 'annot3': annot3, 'univ_annot3': univ_annot3}
return result
def get_test_calib(seq):
assert 1 <= seq <= 6, seq
# Numbers are coming from the "mpi_inf_3dhp_test_set/test_util/camera_calibration/*.calib" files
if 1 <= seq <= 4:
f = 7.32506 / 10 * 2048
cx = 1024 - 0.0322884 / 10 * 2048
cy = 1024 + 0.0929296 / 10 * 2048
elif 5 <= seq <= 6:
f = 8.770747185 / 10 * 1920
cx = 1920 / 2 - 0.104908645 / 10 * 1920
cy = 1080 / 2 + 0.104899704 / 5.625000000 * 1080
return np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]], dtype='float32')
def get_calibration_matrices():
"""
Returns:
dict (subject, seq, camera) to intrinsic camera matrix
"""
calibs = {}
for subject in range(1, 9):
for seq in [1, 2]:
with open(MPII_3DHP_PATH + '/S%d/Seq%d/camera.calibration' % (subject, seq)) as f:
data = f.readlines()
data = [x.strip() for x in data]
camera = None
for line in data:
if line.startswith("name"):
camera = int(line[5:])
elif line.startswith("intrinsic"):
assert camera is not None
# print line
line = line[len("intrinsic"):].strip()
parts = line.split(' ')
parts = list(map(float, parts))
assert len(parts) == 16
c = np.eye(3, dtype='float32')
c[0, 0] = parts[0]
c[0, 2] = parts[2]
c[1, 1] = parts[5]
c[1, 2] = parts[6]
calibs[(subject, seq, int(camera))] = c
return calibs
| 6,001 | 34.099415 | 136 | py |
pose_refinement | pose_refinement-master/src/databases/__init__.py | 0 | 0 | 0 | py | |
pose_refinement | pose_refinement-master/src/model/pose_refinement.py | import numpy as np
import torch
from scipy import ndimage
from databases.joint_sets import MuPoTSJoints
from training.callbacks import BaseMPJPECalculator
from training.torch_tools import get_optimizer
from util.misc import assert_shape
from util.pose import remove_root, insert_zero_joint
def pose_error(pred, init):
return torch.sum((pred - init) ** 2)
def euc_err(pred, gt):
""" Calculates the euclidean distance between each joint (not squared). """
if isinstance(pred, np.ndarray):
return np.linalg.norm(pred - gt, axis=-1)
else:
return torch.norm(pred - gt, dim=-1)
def zero_velocity_loss(pred, step=1):
return torch.sum((pred[step:] - pred[:-step]) ** 2)
def step_zero_velocity_loss(pred, step=1):
return torch.sum((pred[step:] - pred[:-step]) ** 2, dim=(1, 2))
def const_velocity_loss(pred, step=1):
velocity = pred[step:] - pred[:-step]
return torch.sum((velocity[step:] - velocity[:-step]) ** 2)
def step_const_velocity_loss(pred, step):
velocity = pred[step:] - pred[:-step]
return torch.sum((velocity[step:] - velocity[:-step]) ** 2, dim=(1, 2))
def gmloss(err, a):
""" Geman-McClure cost function"""
square = err * err
return square / (square + a)
def capped_l2(err, a):
""" calculates min(err*2, a) """
if isinstance(err, np.ndarray):
return np.minimum(err * err, a)
else:
err2 = err * err
# err2 = err**2
return torch.where(err2 < a, err2, a)
def capped_l2_euc_err(pred, gt, a):
""" calculates min(err*2, a) """
if isinstance(pred, np.ndarray):
err = np.sum((pred - gt) ** 2, axis=-1)
return np.minimum(err * err, a)
else:
diff = pred - gt
err = torch.sum(diff * diff, dim=-1)
return torch.where(err < a, err, a)
def abs_to_hiprel(poses, joint_set):
""" Converts an absolute pose into [hi]+relative_pose. """
assert_shape(poses, (None, joint_set.NUM_JOINTS, 3))
root = poses[:, [joint_set.index_of('hip')]].copy()
rel = remove_root(poses, joint_set.index_of('hip'))
return np.concatenate([root, rel], axis=-2)
def add_back_hip(poses, joint_set):
""" Inverse of abs_to_hiprel """
assert_shape(poses, (None, joint_set.NUM_JOINTS, 3))
root = poses[:, [0]].copy()
hip_ind = joint_set.index_of('hip')
result = insert_zero_joint(poses[:, 1:], hip_ind)
result += root
return result
class StackedArrayAllMupotsEvaluator(BaseMPJPECalculator):
"""
An evaluator that expects a stacked numpy array as prediction results.
Uses all poses, no masking out invisible poses.
"""
def __init__(self, pred, dataset, ignore_invalid, post_process3d=None, prefix='test'):
self.prediction = pred
self.dataset = dataset
self.ignore_invalid = ignore_invalid
data_3d_mm = {}
for seq in range(1, 21):
inds = self.dataset.index.seq_num == seq
if self.ignore_invalid:
inds = inds & self.dataset.valid_annotations
data_3d_mm[seq] = dataset.poses3d[inds]
super().__init__(data_3d_mm, dataset.pose3d_jointset, post_process3d=post_process3d, csv=None, prefix=prefix)
def pred_and_calc_loss(self, model):
assert model is None, "StackedArrayAllMupotsEvaluator does not handle model evaluation"
preds = {}
losses = {}
for seq in range(1, 21):
inds = self.dataset.index.seq_num == seq
if self.ignore_invalid:
inds = inds & self.dataset.valid_annotations
preds[seq] = self.prediction[inds]
losses[seq] = np.zeros_like(preds[seq])
return losses, preds
def optimize_poses(pred3d, data, _config, **kwargs):
"""
Runs the optimisation process on the dataset defined by resulsts.
Parameters:
pred3d: poses predicted by VideoPose, aligned with dataset
dataset: dataset describing
_config: dictionary of additional parameters
"""
_config = dict(_config)
_config.update(kwargs)
joint_set = MuPoTSJoints()
seqs = np.unique(data.index.seq)
smoothed_pred = np.zeros_like(pred3d)
losses = []
for seq in seqs:
inds = data.index.seq == seq
poses_init = abs_to_hiprel(pred3d[inds].copy(), joint_set).astype('float32') / 1000
# interpolate invisible poses, if required
poses_pred = poses_init.copy()
kp_score = np.mean(data.poses2d[inds, :, 2], axis=-1)
if _config['smooth_visibility']:
kp_score = ndimage.median_filter(kp_score, 9)
kp_score = torch.from_numpy(kp_score).cuda()
poses_init = torch.from_numpy(poses_init).cuda()
poses_pred = torch.from_numpy(poses_pred).cuda()
scale = torch.ones((len(kp_score), 1, 1))
poses_init.requires_grad = False
poses_pred.requires_grad = True
kp_score.requires_grad = False
scale.requires_grad = False
optimizer = get_optimizer([poses_pred], _config)
for i in range(_config['num_iter']):
# smoothing formulation
if _config['pose_loss'] == 'gm':
pose_loss = torch.sum(kp_score.view(-1, 1, 1) * gmloss(poses_pred - poses_init, _config['gm_alpha']))
elif _config['pose_loss'] == 'capped_l2':
pose_loss = torch.sum(kp_score.view(-1, 1, 1) * capped_l2(poses_pred - poses_init,
torch.tensor(_config['l2_cap']).float().cuda()))
elif _config['pose_loss'] == 'capped_l2_euc_err':
pose_loss = torch.sum(kp_score.view(-1, 1) * capped_l2_euc_err(poses_pred, poses_init,
torch.tensor(_config['l2_cap']).float().cuda()))
else:
raise NotImplementedError('Unknown pose_loss' + _config['pose_loss'])
velocity_loss_hip = torch.sum(globals()[_config['smoothness_loss_hip']](poses_pred[:, [0], :], 1))
step = _config['smoothness_loss_hip_largestep']
vel_loss = globals()[_config['smoothness_loss_hip']](poses_pred[:, [0], :], step)
velocity_loss_hip_large = torch.sum((1 - kp_score[-len(vel_loss):]) * vel_loss)
velocity_loss_rel = torch.sum(globals()[_config['smoothness_loss_rel']](poses_pred[:, 1:, :], 1))
vel_loss = globals()[_config['smoothness_loss_rel']](poses_pred[:, 1:, :], step)
velocity_loss_rel_large = torch.sum((1 - kp_score[-len(vel_loss):]) * vel_loss)
total_loss = pose_loss + _config['smoothness_weight_hip'] * velocity_loss_hip \
+ _config['smoothness_weight_hip_large'] * velocity_loss_hip_large \
+ _config['smoothness_weight_rel'] * velocity_loss_rel \
+ _config['smoothness_weight_rel_large'] * velocity_loss_rel_large
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
poses_init = poses_init.detach().cpu().numpy() * 1000
poses_pred = poses_pred.detach().cpu().numpy() * 1000
poses_init = add_back_hip(poses_init, joint_set)
poses_pred = add_back_hip(poses_pred, joint_set)
smoothed_pred[inds] = poses_pred
losses.append(total_loss.item())
if _config.get('print_loss', False):
print('Avg loss:', np.mean(losses))
return smoothed_pred
| 7,511 | 34.267606 | 127 | py |
pose_refinement | pose_refinement-master/src/model/__init__.py | 0 | 0 | 0 | py | |
pose_refinement | pose_refinement-master/src/model/videopose.py | # Based on https://github.com/facebookresearch/VideoPose3D
#
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
class TemporalModelBase(nn.Module):
"""
Do not instantiate this class.
"""
def __init__(self, in_features, num_joints_out,
filter_widths, causal, dropout, channels, layernorm):
super().__init__()
# Validate input
for fw in filter_widths:
assert fw % 2 != 0, 'Only odd filter widths are supported'
self.in_features = in_features
self.num_joints_out = num_joints_out
self.filter_widths = filter_widths
self.layernorm = layernorm
self.channels = channels
self.drop = nn.Dropout(dropout)
self.relu = nn.ReLU(inplace=True)
self.pad = [filter_widths[0] // 2] # list of padding sizes
self.shrink = nn.Conv1d(channels, num_joints_out * 3, 1)
def set_bn_momentum(self, momentum):
# if not self.layernorm:
self.expand_bn.momentum = momentum
for bn in self.layers_bn:
bn.momentum = momentum
def receptive_field(self):
"""
Return the total receptive field of this model as # of frames.
"""
frames = 0
for f in self.pad:
frames += f
return 1 + 2 * frames
def create_norm_layer(self, frame_num):
""" frame_num is the spatial dimension """
if self.layernorm:
# return nn.LayerNorm([self.channels, frame_num], elementwise_affine=False)
return nn.InstanceNorm1d(self.channels, momentum=0.1, affine=True)
else:
return nn.BatchNorm1d(self.channels, momentum=0.1)
def total_causal_shift(self):
"""
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
"""
frames = self.causal_shift[0]
next_dilation = self.filter_widths[0]
for i in range(1, len(self.filter_widths)):
frames += self.causal_shift[i] * next_dilation
next_dilation *= self.filter_widths[i]
return frames
def forward(self, x):
assert len(x.shape) == 3, x.shape
assert x.shape[-1] == self.in_features
# sz = x.shape
# x = x.view(x.shape[0], x.shape[1], -1) # (nBatch,nFrames,nJoints*2) - unroll a single pose
x = x.permute(0, 2, 1) # (nBatch, nFeatures, nFrames)
x = self._forward_blocks(x)
x = x.permute(0, 2, 1) # (nBatch, nFrames, nFeatures)
# x = x.view(sz[0], -1, self.num_joints_out, 3)
return x
class TemporalModel(TemporalModelBase):
"""
Reference 3D pose estimation model with temporal convolutions.
This implementation can be used for all use-cases.
"""
def __init__(self, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False, layernorm=False):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
"""
super().__init__(in_features, num_joints_out, filter_widths, causal, dropout, channels, layernorm)
self.expand_conv = nn.Conv1d(in_features, channels, filter_widths[0], bias=False)
conv_num_frames = 1 # spatial dimension of the output of the conv layer; works only for [3,3,3,...shaped]
for f in filter_widths:
conv_num_frames *= f
conv_num_frames = conv_num_frames - (filter_widths[0]-1)
self.expand_bn = self.create_norm_layer(conv_num_frames)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0]) // 2 if causal else 0] # nonzero only for causal model
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2 * next_dilation) if causal else 0)
conv_num_frames = conv_num_frames - (filter_widths[i] - 1) * next_dilation
layers_conv.append(nn.Conv1d(channels, channels,
filter_widths[i] if not dense else (2 * self.pad[-1] + 1),
dilation=next_dilation if not dense else 1,
bias=False))
layers_bn.append(self.create_norm_layer(conv_num_frames))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
layers_bn.append(self.create_norm_layer(conv_num_frames))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
def _forward_blocks(self, x):
x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
for i in range(len(self.pad) - 1):
pad = self.pad[i + 1]
shift = self.causal_shift[i + 1]
res = x[:, :, pad + shift: x.shape[2] - pad + shift]
x = self.drop(self.relu(self.layers_bn[2 * i](self.layers_conv[2 * i](x))))
x = res + self.drop(self.relu(self.layers_bn[2 * i + 1](self.layers_conv[2 * i + 1](x))))
x = self.shrink(x)
return x
class TemporalModelOptimized1f(TemporalModelBase):
"""
3D pose estimation model optimized for single-frame batching, i.e.
where batches have input length = receptive field, and output length = 1.
This scenario is only used for training when stride == 1.
This implementation replaces dilated convolutions with strided convolutions
to avoid generating unused intermediate results. The weights are interchangeable
with the reference implementation.
"""
def __init__(self, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, layernorm=False):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
"""
super().__init__(in_features, num_joints_out, filter_widths, causal, dropout, channels, layernorm)
self.expand_conv = nn.Conv1d(in_features, channels, filter_widths[0], stride=filter_widths[0], bias=False)
conv_num_frames = 1 # spatial dimesnsion of the output of the conv layer; works only for [3,3,3,...shaped]
for f in filter_widths[1:]:
conv_num_frames *= f
self.expand_bn = self.create_norm_layer(conv_num_frames)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0] // 2) if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2) if causal else 0)
conv_num_frames = conv_num_frames // filter_widths[i]
layers_conv.append(nn.Conv1d(channels, channels, filter_widths[i], stride=filter_widths[i], bias=False))
layers_bn.append(self.create_norm_layer(conv_num_frames))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
layers_bn.append(self.create_norm_layer(conv_num_frames))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
def _forward_blocks(self, x):
x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
for i in range(len(self.pad) - 1):
res = x[:, :, self.causal_shift[i + 1] + self.filter_widths[i + 1] // 2:: self.filter_widths[i + 1]]
x = self.drop(self.relu(self.layers_bn[2 * i](self.layers_conv[2 * i](x))))
x = res + self.drop(self.relu(self.layers_bn[2 * i + 1](self.layers_conv[2 * i + 1](x))))
x = self.shrink(x)
return x
| 9,265 | 40.927602 | 116 | py |
UltraNest | UltraNest-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except:
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from Cython.Distutils import build_ext
extra_include_dirs = ['.']
try:
import numpy
extra_include_dirs += [numpy.get_include()]
except:
pass
ext_args = dict(
include_dirs=extra_include_dirs,
extra_compile_args=['-O3'],
extra_link_args=['-O3'],
)
with open('README.rst', encoding="utf-8") as readme_file:
readme = readme_file.read()
with open('HISTORY.rst', encoding="utf-8") as history_file:
history = history_file.read()
requirements = ['numpy', 'cython', 'matplotlib', 'corner']
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="Johannes Buchner",
author_email='johannes.buchner.acad@gmx.com',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Fit and compare complex models reliably and rapidly. Advanced Nested Sampling.",
install_requires=requirements,
ext_modules = cythonize([
Extension('ultranest.mlfriends', ["ultranest/mlfriends.pyx"],
**ext_args),
Extension('ultranest.stepfuncs', ["ultranest/stepfuncs.pyx"],
**ext_args),
]),
license="GNU General Public License v3",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='ultranest',
name='ultranest',
packages=['ultranest'],
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/JohannesBuchner/ultranest',
version='3.6.1',
zip_safe=False,
cmdclass={'build_ext': build_ext},
)
| 2,347 | 29.102564 | 97 | py |
UltraNest | UltraNest-master/languages/c++/runcppsimple.py | import numpy as np
import ctypes
from ultranest import ReactiveNestedSampler
# this version uses one parameter vector per function call
# because function calls are expensive, the runcpp.py way is more efficient and recommended
mycpplib = ctypes.CDLL("mycpplib.so")
# define the arguments of the functions and return values
mycpplib.my_cpp_transform.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS'),
ctypes.c_size_t]
mycpplib.my_cpp_likelihood.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS'),
ctypes.c_size_t]
mycpplib.my_cpp_likelihood.restype = ctypes.c_double
def mytransformwrapper(cube):
params = cube.copy()
mycpplib.my_cpp_transform(params, params.size)
return params
def mylikelihoodwrapper(params):
return mycpplib.my_cpp_likelihood(params, params.size)
paramnames = ["a", "b", "c"]
sampler = ReactiveNestedSampler(paramnames, mylikelihoodwrapper, transform=mytransformwrapper)
sampler.run()
sampler.print_results()
sampler.plot()
| 1,039 | 29.588235 | 94 | py |
UltraNest | UltraNest-master/languages/c++/runcpp.py | import numpy as np
import ctypes
from ultranest import ReactiveNestedSampler
mycpplib = ctypes.CDLL("mycpplib.so")
# define the arguments of the functions and return values
mycpplib.my_cpp_transform_vectorized.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_size_t,
ctypes.c_size_t]
mycpplib.my_cpp_likelihood_vectorized.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_size_t,
ctypes.c_size_t,
np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS')]
def mytransformwrapper(cube):
params = cube.copy()
mycpplib.my_cpp_transform_vectorized(params, params.shape[1], params.shape[0])
return params
def mylikelihoodwrapper(params):
l = np.zeros(len(params))
mycpplib.my_cpp_likelihood_vectorized(params, params.shape[1], params.shape[0], l)
return l
paramnames = ["a", "b", "c"]
sampler = ReactiveNestedSampler(paramnames, mylikelihoodwrapper, transform=mytransformwrapper, vectorized=True)
sampler.run()
sampler.print_results()
sampler.plot()
| 1,108 | 30.685714 | 111 | py |
UltraNest | UltraNest-master/languages/python/runpy.py | import numpy as np
from ultranest import ReactiveNestedSampler
def mytransform(cube):
return cube * 2 - 1
def mylikelihood(params):
centers = 0.1 * np.arange(params.shape[1]).reshape((1, -1))
return -0.5 * (((params - centers) / 0.01)**2).sum(axis=1)
paramnames = ["a", "b", "c"]
sampler = ReactiveNestedSampler(paramnames, mylikelihood, transform=mytransform, vectorized=True)
sampler.run()
sampler.print_results()
sampler.plot()
| 446 | 26.9375 | 97 | py |
UltraNest | UltraNest-master/languages/c/runcsimple.py | import numpy as np
import ctypes
from ultranest import ReactiveNestedSampler
# this version uses one parameter vector per function call
# because function calls are expensive, the runc.py way is more efficient and recommended
myclib = ctypes.CDLL("mylib.so")
# define the arguments of the functions and return values
myclib.my_c_transform.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS'),
ctypes.c_size_t]
myclib.my_c_likelihood.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS'),
ctypes.c_size_t]
myclib.my_c_likelihood.restype = ctypes.c_double
def mytransformwrapper(cube):
params = cube.copy()
myclib.my_c_transform(params, params.size)
return params
def mylikelihoodwrapper(params):
return myclib.my_c_likelihood(params, params.size)
paramnames = ["a", "b", "c"]
sampler = ReactiveNestedSampler(paramnames, mylikelihoodwrapper, transform=mytransformwrapper)
sampler.run()
sampler.print_results()
sampler.plot()
| 1,013 | 29.727273 | 94 | py |
UltraNest | UltraNest-master/languages/c/runc.py | import numpy as np
import ctypes
from ultranest import ReactiveNestedSampler
myclib = ctypes.CDLL("mylib.so")
# define the arguments of the functions and return values
myclib.my_c_transform_vectorized.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_size_t,
ctypes.c_size_t]
myclib.my_c_likelihood_vectorized.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_size_t,
ctypes.c_size_t,
np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS')]
def mytransformwrapper(cube):
params = cube.copy()
myclib.my_c_transform_vectorized(params, params.shape[1], params.shape[0])
return params
def mylikelihoodwrapper(params):
l = np.zeros(len(params))
myclib.my_c_likelihood_vectorized(params, params.shape[1], params.shape[0], l)
return l
paramnames = ["a", "b", "c"]
sampler = ReactiveNestedSampler(paramnames, mylikelihoodwrapper, transform=mytransformwrapper, vectorized=True)
sampler.run()
sampler.print_results()
sampler.plot()
| 1,089 | 30.142857 | 111 | py |
UltraNest | UltraNest-master/languages/fortran/runfort.py | import numpy as np
import ctypes
from ultranest import ReactiveNestedSampler
myfortlib = ctypes.CDLL("myfortlib.so")
# define the arguments of the functions and return values
myfortlib.my_fort_transform.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS'),
ctypes.POINTER(ctypes.c_size_t)]
myfortlib.my_fort_transform.restype = None
myfortlib.my_fort_likelihood.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS'),
ctypes.POINTER(ctypes.c_size_t), ctypes.POINTER(ctypes.c_double)]
myfortlib.my_fort_likelihood.restype = ctypes.c_double
def mytransformwrapper(cube):
params = cube.copy()
ndim = ctypes.c_size_t(cube.size)
myfortlib.my_fort_transform(params, ctypes.pointer(ndim))
return params
print(mytransformwrapper(np.array([0.5, 0.5])))
def mylikelihoodwrapper(params):
ndim = ctypes.c_size_t(params.size)
l = ctypes.c_double(0.0)
myfortlib.my_fort_likelihood(params, ctypes.pointer(ndim), ctypes.pointer(l))
return l.value
print(mylikelihoodwrapper(np.array([0.1, 0.2])))
paramnames = ["a", "b", "c"]
sampler = ReactiveNestedSampler(paramnames, mylikelihoodwrapper, transform=mytransformwrapper)
sampler.run()
sampler.print_results()
sampler.plot()
| 1,249 | 31.051282 | 94 | py |
UltraNest | UltraNest-master/examples/testfunnel.py | import argparse
import numpy as np
from numpy import log
def main(args):
np.random.seed(2)
ndim = args.x_dim
sigma = args.sigma
centers = np.sin(np.arange(ndim) / 2.)
data = np.random.normal(centers, sigma).reshape((1, -1))
def loglike(theta):
sigma = 10**theta[:,0]
like = -0.5 * (((theta[:,1:] - data)/sigma.reshape((-1, 1)))**2).sum(axis=1) - 0.5 * log(2 * np.pi * sigma**2) * ndim
return like
def transform(x):
z = x * 20 - 10
z[:,0] = x[:,0] * 6 - 3
return z
import string
paramnames = ['sigma'] + list(string.ascii_lowercase)[:ndim]
if args.reactive:
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=args.log_dir + 'RNS-%dd' % ndim, vectorized=True,
resume=True)
sampler.run(log_interval=20, min_num_live_points=args.num_live_points)
sampler.plot()
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
num_live_points=args.num_live_points, vectorized=True,
log_dir=args.log_dir + '-%dd' % ndim, resume=True)
sampler.run()
sampler.plot()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--switch', type=float, default=-1)
parser.add_argument('--sigma', type=float, default=0.5)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--log_dir', type=str, default='logs/funnel')
parser.add_argument('--reactive', action='store_true')
args = parser.parse_args()
main(args)
| 1,890 | 33.381818 | 125 | py |
UltraNest | UltraNest-master/examples/rundirichlet.py | #!/usr/bin/env python3
"""
This script tests the UltraNest stepsamplers
in a few configurations with a real model.
"""
import numpy as np
import ultranest, ultranest.stepsampler
# velocity dispersions of dwarf galaxies by van Dokkum et al., Nature, 555, 629 https://arxiv.org/abs/1803.10237v1
values = np.array([15, 4, 2, 11, 1, -2, -1, -14, -39, -3])
values_lo = np.array([7, 16, 6, 3, 6, 5, 10, 6, 11, 13])
values_hi = np.array([7, 15, 8, 3, 6, 6, 10, 7, 14, 14])
def run():
n_data = len(values)
samples = []
for i in range(n_data):
# draw normal random points
u = np.random.normal(size=400)
v = values[i] + np.where(u < 0, u * values_lo[i], u * values_hi[i])
samples.append(v)
data = np.array(samples)
Nobj, Nsamples = data.shape
minval = -80
maxval = +80
ndim = 8
viz_callback = None
bins = np.linspace(minval, maxval, ndim+1)
binned_data = np.array([np.histogram(row, bins=bins)[0] for row in data])
param_names = ['bin%d' % (i+1) for i in range(ndim)]
def likelihood(params):
"""Histogram model"""
return np.log(np.dot(binned_data, params) / Nsamples + 1e-300).sum()
def transform_dirichlet(quantiles):
"""Histogram distribution priors"""
# https://en.wikipedia.org/wiki/Dirichlet_distribution#Random_number_generation
# first inverse transform sample from Gamma(alpha=1,beta=1), which is Exponential(1)
gamma_quantiles = -np.log(quantiles)
# dirichlet variables
return gamma_quantiles / gamma_quantiles.sum()
stepsamplers = [
ultranest.stepsampler.RegionBallSliceSampler(40, region_filter=False, adaptive_nsteps='move-distance'),
ultranest.stepsampler.RegionSliceSampler(40, region_filter=False),
ultranest.stepsampler.CubeSliceSampler(40, region_filter=True),
ultranest.stepsampler.RegionMHSampler(40, region_filter=False),
ultranest.stepsampler.RegionSequentialSliceSampler(40, region_filter=True, adaptive_nsteps='move-distance'),
ultranest.stepsampler.SpeedVariableRegionSliceSampler(
step_matrix=[[1], [1,2,3], Ellipsis, np.ones(len(param_names), dtype=bool)], nsteps=40, region_filter=False),
]
for stepsampler in stepsamplers:
print(stepsampler)
sampler = ultranest.ReactiveNestedSampler(
param_names, likelihood, transform_dirichlet)
sampler.stepsampler = stepsampler
sampler.run(frac_remain=0.5, viz_callback=viz_callback)
sampler.print_results()
if __name__ == '__main__':
run()
| 2,378 | 33.478261 | 114 | py |
UltraNest | UltraNest-master/examples/testslantedeggbox.py | import os
import sys
import argparse
import numpy as np
from numpy import cos, pi
def main(args):
def loglike(z):
chi = (2. + (cos(z[:,:2] / 2.)).prod(axis=1))**5
chi2 = -np.abs((z - 5 * pi) / 0.5).sum(axis=1)
return chi + chi2
def transform(x):
return x * 100
import string
paramnames = list(string.ascii_lowercase)[:args.x_dim]
if args.reactive:
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=args.log_dir + 'RNS-%dd' % args.x_dim, resume=True,
vectorized=True)
#log_dir=None)
sampler.run(log_interval=20, min_num_live_points=args.num_live_points)
sampler.plot()
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
num_live_points=args.num_live_points, vectorized=True,
log_dir=args.log_dir + '%dd' % args.x_dim, resume=True)
#log_dir=None)
sampler.run(log_interval=20)
sampler.plot()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument("--test_samples", type=int, default=0)
parser.add_argument('--log_dir', type=str, default='logs/slantedeggbox')
parser.add_argument('--reactive', action='store_true')
args = parser.parse_args()
main(args)
| 1,676 | 31.25 | 82 | py |
UltraNest | UltraNest-master/examples/test.py | import os
import sys
import argparse
import numpy as np
def main(args):
from ultranest import NestedSampler
#def loglike(z):
# return np.array([-sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0) for x in z])
def loglike_(z):
return np.array([-sum(100.0 * (x[1::2] - x[::2] ** 2.0) ** 2.0 + (1 - x[::2]) ** 2.0) for x in z])
def loglike(z):
a = np.array([-0.5 * sum([((xi - 0.83456 + i*0.1)/0.01)**2 for i, xi in enumerate(x)]) for x in z])
b = np.array([-0.5 * sum([((xi - 0.43456 - i*0.1)/0.01)**2 for i, xi in enumerate(x)]) for x in z])
return np.logaddexp(a, b)
def transform(x):
return 10. * x - 5.
import string
paramnames = list(string.ascii_lowercase)[:args.x_dim]
sampler = NestedSampler(paramnames, loglike, transform=transform,
vectorized=True, log_dir=args.log_dir)
sampler.run(log_interval=20, num_live_points=args.num_live_points)
sampler.plot()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=50,
help="number of train iters")
parser.add_argument("--mcmc_steps", type=int, default=0)
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--switch', type=float, default=-1)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument("--test_samples", type=int, default=0)
parser.add_argument("--test_mcmc_steps", type=int, default=1000)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=0)
parser.add_argument('--log_dir', type=str, default='logs/rosenbrock')
args = parser.parse_args()
main(args)
| 2,192 | 38.872727 | 107 | py |
UltraNest | UltraNest-master/examples/testrosenbrock.py | import argparse
import numpy as np
def main(args):
ndim = args.x_dim
adaptive_nsteps = args.adapt_steps
if adaptive_nsteps is None:
adaptive_nsteps = False
def loglike(theta):
a = theta[:,:-1]
b = theta[:,1:]
return -2 * (100 * (b - a**2)**2 + (1 - a)**2).sum(axis=1)
def transform(u):
return u * 20 - 10
def transform_loglike_gradient(u):
theta = u * 20 - 10
a = theta[:-1]
b = theta[1:]
grad = theta.copy()
L = -2 * (100 * (b - a**2)**2 + (1 - a)**2).sum()
for i in range(ndim):
a = theta[i]
if i < ndim-1:
b = theta[i+1]
grad[i] = -2*(-400 * a * (b - a**2) - 2 * (1 - a))
if i > 0:
c = theta[i-1]
grad[i] += - 400 * (a - c**2)
prior_factor = 20
return theta, L, grad * prior_factor
def gradient(u):
theta = u * 20 - 10
grad = theta.copy()
for i in range(ndim):
a = theta[i]
if i < ndim-1:
b = theta[i+1]
grad[i] = -2*(-400 * a * (b - a**2) - 2 * (1 - a))
if i > 0:
c = theta[i-1]
grad[i] += - 400 * (a - c**2)
prior_factor = 20
return grad * prior_factor
paramnames = ['param%d' % (i+1) for i in range(ndim)]
if args.pymultinest:
from pymultinest.solve import solve
def flat_loglike(theta):
return loglike(theta.reshape((1, -1)))
result = solve(LogLikelihood=flat_loglike, Prior=transform,
n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
verbose=True, resume=True, importance_nested_sampling=False)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
elif args.reactive:
if args.slice:
log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim, args.slice_steps)
elif args.harm:
log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim, args.slice_steps)
elif args.aharm:
log_dir = args.log_dir + 'RNS-%dd-aharm%d' % (ndim, args.slice_steps)
elif args.dyhmc:
log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim, args.slice_steps)
elif args.dychmc:
log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim, args.slice_steps)
else:
log_dir = args.log_dir + 'RNS-%dd' % (ndim)
if adaptive_nsteps:
log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=log_dir, resume=True,
vectorized=True)
if args.slice:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w'), max_nsteps=ndim)
if args.harm:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w'), max_nsteps=ndim)
if args.aharm:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.AHARMSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w'), max_nsteps=ndim)
if args.dyhmc:
import ultranest.dyhmc
from ultranest.utils import verify_gradient
verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps,
transform_loglike_gradient=transform_loglike_gradient, adaptive_nsteps=adaptive_nsteps)
if args.dychmc:
import ultranest.dychmc
from ultranest.utils import verify_gradient
verify_gradient(ndim, transform, loglike, gradient)
sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps,
transform=transform, loglike=loglike, gradient=gradient, adaptive_nsteps=adaptive_nsteps)
sampler.run(frac_remain=0.5, min_num_live_points=args.num_live_points, max_num_improvement_loops=1)
sampler.print_results()
if sampler.stepsampler is not None:
sampler.stepsampler.plot(filename = log_dir + '/stepsampler_stats_region.pdf')
if ndim <= 20:
sampler.plot()
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
num_live_points=args.num_live_points, vectorized=True,
log_dir=args.log_dir + '-%dd' % ndim, resume=True)
sampler.run()
sampler.print_results()
sampler.plot()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=400)
parser.add_argument('--log_dir', type=str, default='logs/rosen')
parser.add_argument('--pymultinest', action='store_true')
parser.add_argument('--reactive', action='store_true')
parser.add_argument('--slice', action='store_true')
parser.add_argument('--harm', action='store_true')
parser.add_argument('--aharm', action='store_true')
parser.add_argument('--dyhmc', action='store_true')
parser.add_argument('--dychmc', action='store_true')
parser.add_argument('--slice_steps', type=int, default=100)
parser.add_argument('--adapt_steps', type=str)
args = parser.parse_args()
main(args)
| 6,260 | 39.655844 | 136 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.