repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
ML-Doctor | ML-Doctor-main/demoloader/DCGAN.py | import torch.nn as nn
class Generator(nn.Module):
def __init__(self, ngpu=1, nc=3, nz=100, ngf=64):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class Discriminator(nn.Module):
def __init__(self, ngpu=1, nc=3, ndf=64):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
class FashionGenerator(nn.Module):
def __init__(self):
super(FashionGenerator, self).__init__()
d_input = 100
d_output = 64 * 64
self.input = nn.Sequential(
nn.Linear(d_input, 256),
nn.ReLU()
)
self.hidden1 = nn.Sequential(
nn.Linear(256, 512),
nn.ReLU()
)
self.hidden2 = nn.Sequential(
nn.Linear(512, 1024),
nn.ReLU()
)
self.output = nn.Sequential(
nn.Linear(1024, d_output),
nn.Tanh()
)
def forward(self, x):
x = x.view(-1, 100)
x = self.input(x)
x = self.hidden1(x)
x = self.hidden2(x)
x = self.output(x)
return x.reshape(-1, 1, 64, 64)
class FashionDiscriminator(nn.Module):
def __init__(self):
super(FashionDiscriminator, self).__init__()
d_input = 64 * 64
d_output = 1
self.input = nn.Sequential(
nn.Linear(d_input, 1024),
nn.ReLU(),
nn.Dropout(0.2)
)
self.hidden1 = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
nn.Dropout(0.2)
)
self.hidden2 = nn.Sequential(
nn.Linear(512, 256),
nn.ReLU(),
nn.Dropout(0.2)
)
self.output = nn.Sequential(
nn.Linear(256, d_output),
nn.Sigmoid()
)
def forward(self, x):
x = x.reshape(-1, 64 * 64)
x = self.input(x)
x = self.hidden1(x)
x = self.hidden2(x)
x = self.output(x)
return x
| 4,237 | 29.934307 | 82 | py |
ML-Doctor | ML-Doctor-main/utils/define_models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class attrinf_attack_model(nn.Module):
def __init__(self, inputs, outputs):
super(attrinf_attack_model, self).__init__()
self.classifier = nn.Linear(inputs, outputs)
def forward(self, x):
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class ShadowAttackModel(nn.Module):
def __init__(self, class_num):
super(ShadowAttackModel, self).__init__()
self.Output_Component = nn.Sequential(
# nn.Dropout(p=0.2),
nn.Linear(class_num, 128),
nn.ReLU(),
nn.Linear(128, 64),
)
self.Prediction_Component = nn.Sequential(
# nn.Dropout(p=0.2),
nn.Linear(1, 128),
nn.ReLU(),
nn.Linear(128, 64),
)
self.Encoder_Component = nn.Sequential(
# nn.Dropout(p=0.2),
nn.Linear(128, 256),
nn.ReLU(),
# nn.Dropout(p=0.2),
nn.Linear(256, 128),
nn.ReLU(),
# nn.Dropout(p=0.2),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 2),
)
def forward(self, output, prediction):
Output_Component_result = self.Output_Component(output)
Prediction_Component_result = self.Prediction_Component(prediction)
final_inputs = torch.cat((Output_Component_result, Prediction_Component_result), 1)
final_result = self.Encoder_Component(final_inputs)
return final_result
class PartialAttackModel(nn.Module):
def __init__(self, class_num):
super(PartialAttackModel, self).__init__()
self.Output_Component = nn.Sequential(
# nn.Dropout(p=0.2),
nn.Linear(class_num, 128),
nn.ReLU(),
nn.Linear(128, 64),
)
self.Prediction_Component = nn.Sequential(
# nn.Dropout(p=0.2),
nn.Linear(1, 128),
nn.ReLU(),
nn.Linear(128, 64),
)
self.Encoder_Component = nn.Sequential(
# nn.Dropout(p=0.2),
nn.Linear(128, 256),
nn.ReLU(),
# nn.Dropout(p=0.2),
nn.Linear(256, 128),
nn.ReLU(),
# nn.Dropout(p=0.2),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 2),
)
def forward(self, output, prediction):
Output_Component_result = self.Output_Component(output)
Prediction_Component_result = self.Prediction_Component(prediction)
final_inputs = torch.cat((Output_Component_result, Prediction_Component_result), 1)
final_result = self.Encoder_Component(final_inputs)
return final_result
class WhiteBoxAttackModel(nn.Module):
def __init__(self, class_num, total):
super(WhiteBoxAttackModel, self).__init__()
self.Output_Component = nn.Sequential(
nn.Dropout(p=0.2),
nn.Linear(class_num, 128),
nn.ReLU(),
nn.Linear(128, 64),
)
self.Loss_Component = nn.Sequential(
nn.Dropout(p=0.2),
nn.Linear(1, 128),
nn.ReLU(),
nn.Linear(128, 64),
)
self.Gradient_Component = nn.Sequential(
nn.Dropout(p=0.2),
nn.Conv2d(1, 1, kernel_size=5, padding=2),
nn.BatchNorm2d(1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Flatten(),
nn.Dropout(p=0.2),
nn.Linear(total, 256),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 64),
)
self.Label_Component = nn.Sequential(
nn.Dropout(p=0.2),
nn.Linear(class_num, 128),
nn.ReLU(),
nn.Linear(128, 64),
)
self.Encoder_Component = nn.Sequential(
nn.Dropout(p=0.2),
nn.Linear(256, 256),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(256, 128),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 2),
)
def forward(self, output, loss, gradient, label):
Output_Component_result = self.Output_Component(output)
Loss_Component_result = self.Loss_Component(loss)
Gradient_Component_result = self.Gradient_Component(gradient)
Label_Component_result = self.Label_Component(label)
# Loss_Component_result = F.softmax(Loss_Component_result, dim=1)
# Gradient_Component_result = F.softmax(Gradient_Component_result, dim=1)
# final_inputs = Output_Component_result
# final_inputs = Loss_Component_result
# final_inputs = Gradient_Component_result
# final_inputs = Label_Component_result
final_inputs = torch.cat((Output_Component_result, Loss_Component_result, Gradient_Component_result, Label_Component_result), 1)
final_result = self.Encoder_Component(final_inputs)
return final_result | 4,227 | 24.017751 | 130 | py |
ML-Doctor | ML-Doctor-main/doctor/modsteal.py | import torch
import torch.nn.functional as F
from math import *
from tqdm import tqdm
class train_steal_model():
def __init__(self, train_loader, test_loader, target_model, attack_model, TARGET_PATH, ATTACK_PATH, device, batch_size, loss, optimizer):
self.device = device
self.batch_size = batch_size
self.train_loader = train_loader
self.test_loader = test_loader
self.TARGET_PATH = TARGET_PATH
self.target_model = target_model.to(self.device)
self.target_model.load_state_dict(torch.load(self.TARGET_PATH, map_location=self.device))
self.target_model.eval()
self.ATTACK_PATH = ATTACK_PATH
self.attack_model = attack_model.to(self.device)
self.criterion = loss
self.optimizer = optimizer
self.index = 0
self.count = [0 for i in range(10)]
self.dataset = []
def train(self, train_set, train_out):
self.attack_model.train()
for inputs, targets in tqdm(zip(train_set, train_out)):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.attack_model(inputs)
outputs = F.softmax(outputs, dim=1)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
def train_with_same_distribution(self):
self.attack_model.train()
train_loss = 0
correct = 0
total = 0
correct_target = 0
total_target = 0
for inputs, targets in tqdm(self.train_loader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
target_model_logit = self.target_model(inputs)
_,target_model_output = target_model_logit.max(1)
target_model_posterior = F.softmax(target_model_logit, dim=1)
# print(inputs, targets)
self.optimizer.zero_grad()
outputs = self.attack_model(inputs)
# outputs = F.softmax(outputs, dim=1)
# loss = self.criterion(outputs, targets)
loss = self.criterion(outputs, target_model_posterior)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
total_target += targets.size(0)
correct_target += predicted.eq(target_model_output).sum().item()
print( 'Train Acc: %.3f%% (%d/%d)' % (100.*correct/total, correct, total))
print( 'Train Agreement: %.3f%% (%d/%d)' % (100.*correct_target/total_target, correct_target, total_target))
def test(self):
self.attack_model.eval()
correct = 0
target_correct = 0
total = 0
agreement_correct = 0
agreement_total = 0
with torch.no_grad():
for inputs, targets in self.test_loader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.attack_model(inputs)
_, predicted = outputs.max(1)
target_model_logit = self.target_model(inputs)
_,target_predicted = target_model_logit.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
target_correct += target_predicted.eq(targets).sum().item()
output_target = self.target_model(inputs)
_, predicted_target = output_target.max(1)
agreement_total += targets.size(0)
agreement_correct += predicted.eq(predicted_target).sum().item()
print( 'Test Acc: %.3f%% (%d/%d)' % (100.*correct/total, correct, total))
print( 'Target Test Acc: %.3f%% (%d/%d)' % (100.*target_correct/total, target_correct, total))
print( 'Test Agreement: %.3f%% (%d/%d)' % (100.*agreement_correct/agreement_total, agreement_correct, agreement_total))
acc_test = correct/total
agreemenet_test = agreement_correct / agreement_total
return acc_test, agreemenet_test
def saveModel(self):
torch.save(self.attack_model.state_dict(), self.ATTACK_PATH)
| 4,341 | 33.188976 | 141 | py |
ML-Doctor | ML-Doctor-main/doctor/attrinf.py | import torch
import pickle
import torch.nn as nn
import torch.optim as optim
from utils.define_models import *
from sklearn.metrics import f1_score
class attack_training():
def __init__(self, device, attack_trainloader, attack_testloader, target_model, TARGET_PATH, ATTACK_PATH):
self.device = device
self.TARGET_PATH = TARGET_PATH
self.ATTACK_PATH = ATTACK_PATH
self.target_model = target_model.to(self.device)
self.target_model.load_state_dict(torch.load(self.TARGET_PATH))
self.target_model.eval()
self.attack_model = None
self.attack_trainloader = attack_trainloader
self.attack_testloader = attack_testloader
self.criterion = nn.CrossEntropyLoss()
self.optimizer = None
# self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, [50, 100], 0.1)
self.dataset_type = None
def _get_activation(self, name, activation):
def hook(model, input, output):
activation[name] = output.detach()
return hook
def init_attack_model(self, size, output_classes):
x = torch.rand(size).to(self.device)
input_classes = self.get_middle_output(x).flatten().shape[0]
self.attack_model = attrinf_attack_model(inputs=input_classes, outputs=output_classes)
self.attack_model.to(self.device)
self.optimizer = optim.Adam(self.attack_model.parameters(), lr=1e-3)
if output_classes == 2:
self.dataset_type = "binary"
else:
self.dataset_type = "macro"
def get_middle_output(self, x):
temp = []
for name, _ in self.target_model.named_parameters():
if "weight" in name:
temp.append(name)
if 1 > len(temp):
raise IndexError('layer is out of range')
name = temp[-2].split('.')
var = eval('self.target_model.' + name[0])
out = {}
var[int(name[1])].register_forward_hook(self._get_activation(name[1], out))
_ = self.target_model(x)
return out[name[1]]
# Training
def train(self, epoch):
self.attack_model.train()
train_loss = 0
correct = 0
total = 0
final_result = []
final_gndtrth = []
final_predict = []
final_probabe = []
for batch_idx, (inputs, [_, targets]) in enumerate(self.attack_trainloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
oracles = self.get_middle_output(inputs)
outputs = self.attack_model(oracles)
outputs = F.softmax(outputs, dim=1)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
if epoch:
final_gndtrth.append(targets)
final_predict.append(predicted)
final_probabe.append(outputs[:, 1])
if epoch:
final_gndtrth = torch.cat(final_gndtrth, dim=0).cpu().detach().numpy()
final_predict = torch.cat(final_predict, dim=0).cpu().detach().numpy()
final_probabe = torch.cat(final_probabe, dim=0).cpu().detach().numpy()
test_f1_score = f1_score(final_gndtrth, final_predict, average=self.dataset_type)
final_result.append(test_f1_score)
with open(self.ATTACK_PATH + "_attrinf_train.p", "wb") as f:
pickle.dump((final_gndtrth, final_predict, final_probabe), f)
print("Saved Attack Test Ground Truth and Predict Sets")
print("Test F1: %f" % (test_f1_score))
final_result.append(1.*correct/total)
print( 'Test Acc: %.3f%% (%d/%d)' % (100.*correct/(1.0*total), correct, total))
return final_result
def test(self, epoch):
self.attack_model.eval()
correct = 0
total = 0
final_result = []
final_gndtrth = []
final_predict = []
final_probabe = []
with torch.no_grad():
for inputs, [_, targets] in self.attack_testloader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
oracles = self.get_middle_output(inputs)
outputs = self.attack_model(oracles)
outputs = F.softmax(outputs, dim=1)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
if epoch:
final_gndtrth.append(targets)
final_predict.append(predicted)
final_probabe.append(outputs[:, 1])
if epoch:
final_gndtrth = torch.cat(final_gndtrth, dim=0).cpu().numpy()
final_predict = torch.cat(final_predict, dim=0).cpu().numpy()
final_probabe = torch.cat(final_probabe, dim=0).cpu().numpy()
test_f1_score = f1_score(final_gndtrth, final_predict, average=self.dataset_type)
final_result.append(test_f1_score)
with open(self.ATTACK_PATH + "_attrinf_test.p", "wb") as f:
pickle.dump((final_gndtrth, final_predict, final_probabe), f)
print("Saved Attack Test Ground Truth and Predict Sets")
print("Test F1: %f" % (test_f1_score))
final_result.append(1.*correct/total)
print( 'Test Acc: %.3f%% (%d/%d)' % (100.*correct/(1.0*total), correct, total))
return final_result
def saveModel(self):
torch.save(self.attack_model.state_dict(), self.ATTACK_PATH + "_attrinf_attack_model.pth")
def train_attack_model(TARGET_PATH, ATTACK_PATH, output_classes, device, target_model, train_loader, test_loader, size):
attack = attack_training(device, train_loader, test_loader, target_model, TARGET_PATH, ATTACK_PATH)
attack.init_attack_model(size, output_classes)
for epoch in range(100):
flag = 1 if epoch==99 else 0
print("<======================= Epoch " + str(epoch+1) + " =======================>")
print("attack training")
acc_train = attack.train(flag)
print("attack testing")
acc_test = attack.test(flag)
attack.saveModel()
print("Saved Attack Model")
print("Finished!!!")
return acc_train, acc_test | 6,547 | 34.978022 | 120 | py |
ML-Doctor | ML-Doctor-main/doctor/modinv.py | import time
import torch
import random
import numpy as np
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
class ccs_inversion(object):
'''
Model inversion is a kind of data reconstruct attack.
This class we implement the attack on neural network,
the attack goal is to generate data that is close to original data distribution.
This attack was first described in Fredrikson's paper (Algorithm 1):
"Model Inversion Attacks that Exploit Confidence Information and Basic Countermeasures" (CCS2015)
-----------------------------NOTICE---------------------------
If the model's output layer doesn't contain Softmax layer, please add it manually.
And parameters will influence the quality of the reconstructed data significantly.
--------------------------------------------------------------
Args:
------------------------
:param target_model: the target model which we are trying to reconstruct its training dataset
:param input_size: the size of the model's input
:param output_size: the size of the model's output
:param target_label: the reconstructed output is belong to this class
:param param_alpha: the number of iteration round
:param param_beta, gamma, lambda: the hyperparameters in paper
'''
def __init__(self, target_model, input_size, output_size, target_label, param_alpha, param_beta, param_gamma, param_lambda, device):
self.target_model = target_model
self.input_size = input_size
self.output_size = output_size
self.target_label = target_label
self.param_alpha = param_alpha
self.param_beta = param_beta
self.param_gamma = param_gamma
self.param_lambda = param_lambda
self.device = device
self.target_model.to(self.device).eval()
def model_invert(self):
current_x = []
cost_x = []
current_x.append(Variable(torch.from_numpy(np.zeros(self.input_size, dtype=np.uint8))).float().to(self.device))
for i in range(self.param_alpha):
cost_x.append(self.invert_cost(current_x[i]).to(self.device))
cost_x[i].backward()
current_x.append((current_x[i] - self.param_lambda * current_x[i].grad).data)
if self.invert_cost(current_x[i + 1]) <= self.param_gamma:
print('Target cost value achieved')
break
elif i >= self.param_beta and self.invert_cost(current_x[i + 1]) >= max(cost_x[self.param_beta:i + 1]):
print('Exceed beta')
break
i = cost_x.index(min(cost_x))
return current_x[i]
def invert_cost(self, input_x):
return 1 - self.target_model(input_x.requires_grad_(True))[0][self.target_label]
def reverse_mse(self, ori_dataset):
'''
output the average MSE value of different classes
:param ori_dataset: the data used to train the target model, please make sure setting the batch size as 1.
:return: MSE value
'''
reverse_data = []
for i in range(self.output_size):
self.target_label = i
a = self.model_invert()
reverse_data.append(a)
class_avg = [Variable(torch.from_numpy(np.zeros(self.input_size, dtype=np.uint8))).float().to(self.device) for _ in range(self.output_size)]
class_mse = [0 for _ in range(self.output_size)]
class_count = [0 for _ in range(self.output_size)]
for x, y in ori_dataset:
x, y = x.to(self.device), y.to(self.device)
class_avg[y] = class_avg[y] + x
class_count[y] = class_count[y] + 1
for i in range(self.output_size):
class_mse[i] = self.figure_mse(class_avg[i] / class_count[i], (reverse_data[i]))
all_class_avg_mse = 0
for i in range(self.output_size):
all_class_avg_mse = all_class_avg_mse + class_mse[i]
return all_class_avg_mse / self.output_size
def figure_mse(self, recover_fig, ori_fig):
'''
:param recover_fig: figure recovered by model inversion attack, type:
:param ori_fig: figure in the training dataset
:return: MSE value of these two figures
'''
diff = nn.MSELoss()
return diff(recover_fig, ori_fig)
def revealer_inversion(G, D, T, E, iden, device, noise = 100,lr=1e-3, momentum=0.9, lamda=100, iter_times=1500, clip_range=1):
'''
This model inversion attack was proposed by Zhang et al. in CVPR20
"The Secret Revealer: Generative Model-Inversion Attacks Against Deep Neural Networks"
'''
iden = iden.view(-1).long().to(device)
G, D, T, E = G.to(device), D.to(device), T.to(device), E.to(device)
criterion = nn.CrossEntropyLoss().to(device)
bs = iden.shape[0]
G.eval()
D.eval()
T.eval()
max_score = torch.zeros(bs)
max_iden = torch.zeros(bs)
z_hat = torch.zeros(bs, noise,1,1)
cnt = 0
for random_seed_sudo in range(10):
tf = time.time()
random_seed = random.randint(0,200)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
z = torch.randn(bs, noise, 1, 1).to(device).float()
z.requires_grad = True
v = torch.zeros(bs, noise, 1, 1).to(device).float()
for i in range(iter_times):
fake = G(z)
label = D(fake)
out = T(fake)
if z.grad is not None:
z.grad.data.zero_()
Prior_Loss = - label.mean()
Iden_Loss = criterion(out, iden)
Total_Loss = Prior_Loss + lamda * Iden_Loss
Total_Loss.backward()
v_prev = v.clone()
gradient = z.grad.data
v = momentum * v - lr * gradient
z = z + ( - momentum * v_prev + (1 + momentum) * v)
z = torch.clamp(z.detach(), -clip_range, clip_range).float()
z.requires_grad = True
Prior_Loss_val = Prior_Loss.item()
Iden_Loss_val = Iden_Loss.item()
if (i + 1) % 300 == 0:
fake_img = G(z.detach())
eval_prob = E(fake_img)
eval_iden = torch.argmax(eval_prob, dim=1).view(-1)
acc = iden.eq(eval_iden.long()).sum().item() * 1.0 / bs
fake = G(z)
score = T(fake)
eval_prob = E(fake)
_, eval_iden = torch.max(eval_prob, dim=1)
for i in range(bs):
_, gtl = torch.max(score, 1)
gt = gtl[i].item()
if score[i, gt].item() > max_score[i].item():
max_score[i] = score[i, gt]
max_iden[i] = eval_iden[i]
z_hat[i, :] = z[i, :]
if eval_iden[i].item() == gt:
cnt += 1
print("Acc:{:.2f}\t".format(cnt * 1.0 / (bs*10)))
return cnt * 1.0 / (bs*10)
def load_data(PATH_target, PATH_evaluation, target_model, evaluate_model):
'''
Evaluate model is used to predict the identity based on the input reconstructed image.
If the evaluation classifier achieves high accuracy, the reconstructed image is considered to expose
private information about the target label.
The evaluate model should be different from the target network because the reconstructed images may
incorporate features that overfit the target network while being semantically meaningless.
Moreover, the evaluation classifier should be highly performant.
'''
target_model.load_state_dict(torch.load(PATH_target))
evaluate_model.load_state_dict(torch.load(PATH_evaluation))
print("Finished Loading")
return target_model, evaluate_model
def prepare_GAN(data_type, discriminator, generator, PATH_1, PATH_2):
discriminator.load_state_dict(torch.load(PATH_1))
generator.load_state_dict(torch.load(PATH_2))
iden = torch.zeros(10)
if data_type.lower() == 'stl10' or data_type.lower() == 'fmnist':
for i in range(10):
iden[i] = i
elif data_type.lower() == 'utkface':
for i in range(10):
iden[i] = i % 4
elif data_type.lower() == 'celeba':
for i in range(10):
iden[i] = i % 8
return discriminator, generator, iden | 8,409 | 36.713004 | 148 | py |
ML-Doctor | ML-Doctor-main/doctor/meminf.py | import os
import glob
import torch
import pickle
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
np.set_printoptions(threshold=np.inf)
from opacus import PrivacyEngine
from torch.optim import lr_scheduler
from sklearn.metrics import f1_score, roc_auc_score
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data)
m.bias.data.fill_(0)
elif isinstance(m,nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
class shadow():
def __init__(self, trainloader, testloader, model, device, use_DP, noise, norm, loss, optimizer, delta):
self.delta = delta
self.use_DP = use_DP
self.device = device
self.model = model.to(self.device)
self.trainloader = trainloader
self.testloader = testloader
self.criterion = loss
self.optimizer = optimizer
self.noise_multiplier, self.max_grad_norm = noise, norm
if self.use_DP:
self.privacy_engine = PrivacyEngine()
self.model, self.optimizer, self.trainloader = self.privacy_engine.make_private(
module=self.model,
optimizer=self.optimizer,
data_loader=self.trainloader,
noise_multiplier=self.noise_multiplier,
max_grad_norm=self.max_grad_norm,
)
# self.model = module_modification.convert_batchnorm_modules(self.model)
# inspector = DPModelInspector()
# inspector.validate(self.model)
# privacy_engine = PrivacyEngine(
# self.model,
# batch_size=batch_size,
# sample_size=len(self.trainloader.dataset),
# alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
# noise_multiplier=self.noise_multiplier,
# max_grad_norm=self.max_grad_norm,
# secure_rng=False,
# )
print( 'noise_multiplier: %.3f | max_grad_norm: %.3f' % (self.noise_multiplier, self.max_grad_norm))
# privacy_engine.attach(self.optimizer)
self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, [50, 100], 0.1)
# Training
def train(self):
self.model.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(self.trainloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
if self.use_DP:
epsilon = self.privacy_engine.accountant.get_epsilon(delta=self.delta)
# epsilon, best_alpha = self.optimizer.privacy_engine.get_privacy_spent(1e-5)
print("\u03B5: %.3f \u03B4: 1e-5" % (epsilon))
self.scheduler.step()
print( 'Train Acc: %.3f%% (%d/%d) | Loss: %.3f' % (100.*correct/total, correct, total, 1.*train_loss/batch_idx))
return 1.*correct/total
def saveModel(self, path):
torch.save(self.model.state_dict(), path)
def get_noise_norm(self):
return self.noise_multiplier, self.max_grad_norm
def test(self):
self.model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for inputs, targets in self.testloader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print( 'Test Acc: %.3f%% (%d/%d)' % (100.*correct/total, correct, total))
return 1.*correct/total
class distillation_training():
def __init__(self, PATH, trainloader, testloader, model, teacher, device, optimizer, T, alpha):
self.device = device
self.model = model.to(self.device)
self.trainloader = trainloader
self.testloader = testloader
self.PATH = PATH
self.teacher = teacher.to(self.device)
self.teacher.load_state_dict(torch.load(self.PATH))
self.teacher.eval()
self.criterion = nn.KLDivLoss(reduction='batchmean')
self.optimizer = optimizer
self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, [50, 100], 0.1)
self.T = T
self.alpha = alpha
def distillation_loss(self, y, labels, teacher_scores, T, alpha):
loss = self.criterion(F.log_softmax(y/T, dim=1), F.softmax(teacher_scores/T, dim=1))
loss = loss * (T*T * alpha) + F.cross_entropy(y, labels) * (1. - alpha)
return loss
def train(self):
self.model.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, [targets, _]) in enumerate(self.trainloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(inputs)
teacher_output = self.teacher(inputs)
teacher_output = teacher_output.detach()
loss = self.distillation_loss(outputs, targets, teacher_output, T=self.T, alpha=self.alpha)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
self.scheduler.step()
print( 'Train Acc: %.3f%% (%d/%d) | Loss: %.3f' % (100.*correct/total, correct, total, 1.*train_loss/batch_idx))
return 1.*correct/total
def saveModel(self, path):
torch.save(self.model.state_dict(), path)
def test(self):
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for inputs, [targets, _] in self.testloader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print( 'Test Acc: %.3f%% (%d/%d)' % (100.*correct/total, correct, total))
return 1.*correct/total
class attack_for_blackbox():
def __init__(self, SHADOW_PATH, TARGET_PATH, ATTACK_SETS, attack_train_loader, attack_test_loader, target_model, shadow_model, attack_model, device):
self.device = device
self.TARGET_PATH = TARGET_PATH
self.SHADOW_PATH = SHADOW_PATH
self.ATTACK_SETS = ATTACK_SETS
self.target_model = target_model.to(self.device)
self.shadow_model = shadow_model.to(self.device)
self.target_model.load_state_dict(torch.load(self.TARGET_PATH))
self.shadow_model.load_state_dict(torch.load(self.SHADOW_PATH))
self.target_model.eval()
self.shadow_model.eval()
self.attack_train_loader = attack_train_loader
self.attack_test_loader = attack_test_loader
self.attack_model = attack_model.to(self.device)
torch.manual_seed(0)
self.attack_model.apply(weights_init)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.attack_model.parameters(), lr=1e-5)
def _get_data(self, model, inputs, targets):
result = model(inputs)
output, _ = torch.sort(result, descending=True)
# results = F.softmax(results[:,:5], dim=1)
_, predicts = result.max(1)
prediction = predicts.eq(targets).float()
# prediction = []
# for predict in predicts:
# prediction.append([1,] if predict else [0,])
# prediction = torch.Tensor(prediction)
# final_inputs = torch.cat((results, prediction), 1)
# print(final_inputs.shape)
return output, prediction.unsqueeze(-1)
def prepare_dataset(self):
with open(self.ATTACK_SETS + "train.p", "wb") as f:
for inputs, targets, members in self.attack_train_loader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
output, prediction = self._get_data(self.shadow_model, inputs, targets)
# output = output.cpu().detach().numpy()
pickle.dump((output, prediction, members), f)
print("Finished Saving Train Dataset")
with open(self.ATTACK_SETS + "test.p", "wb") as f:
for inputs, targets, members in self.attack_test_loader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
output, prediction = self._get_data(self.target_model, inputs, targets)
# output = output.cpu().detach().numpy()
pickle.dump((output, prediction, members), f)
print("Finished Saving Test Dataset")
def train(self, epoch, result_path):
self.attack_model.train()
batch_idx = 1
train_loss = 0
correct = 0
total = 0
final_train_gndtrth = []
final_train_predict = []
final_train_probabe = []
final_result = []
with open(self.ATTACK_SETS + "train.p", "rb") as f:
while(True):
try:
output, prediction, members = pickle.load(f)
output, prediction, members = output.to(self.device), prediction.to(self.device), members.to(self.device)
results = self.attack_model(output, prediction)
results = F.softmax(results, dim=1)
losses = self.criterion(results, members)
losses.backward()
self.optimizer.step()
train_loss += losses.item()
_, predicted = results.max(1)
total += members.size(0)
correct += predicted.eq(members).sum().item()
if epoch:
final_train_gndtrth.append(members)
final_train_predict.append(predicted)
final_train_probabe.append(results[:, 1])
batch_idx += 1
except EOFError:
break
if epoch:
final_train_gndtrth = torch.cat(final_train_gndtrth, dim=0).cpu().detach().numpy()
final_train_predict = torch.cat(final_train_predict, dim=0).cpu().detach().numpy()
final_train_probabe = torch.cat(final_train_probabe, dim=0).cpu().detach().numpy()
train_f1_score = f1_score(final_train_gndtrth, final_train_predict)
train_roc_auc_score = roc_auc_score(final_train_gndtrth, final_train_probabe)
final_result.append(train_f1_score)
final_result.append(train_roc_auc_score)
with open(result_path, "wb") as f:
pickle.dump((final_train_gndtrth, final_train_predict, final_train_probabe), f)
print("Saved Attack Train Ground Truth and Predict Sets")
print("Train F1: %f\nAUC: %f" % (train_f1_score, train_roc_auc_score))
final_result.append(1.*correct/total)
print( 'Train Acc: %.3f%% (%d/%d) | Loss: %.3f' % (100.*correct/total, correct, total, 1.*train_loss/batch_idx))
return final_result
def test(self, epoch, result_path):
self.attack_model.eval()
batch_idx = 1
correct = 0
total = 0
final_test_gndtrth = []
final_test_predict = []
final_test_probabe = []
final_result = []
with torch.no_grad():
with open(self.ATTACK_SETS + "test.p", "rb") as f:
while(True):
try:
output, prediction, members = pickle.load(f)
output, prediction, members = output.to(self.device), prediction.to(self.device), members.to(self.device)
results = self.attack_model(output, prediction)
_, predicted = results.max(1)
total += members.size(0)
correct += predicted.eq(members).sum().item()
results = F.softmax(results, dim=1)
if epoch:
final_test_gndtrth.append(members)
final_test_predict.append(predicted)
final_test_probabe.append(results[:, 1])
batch_idx += 1
except EOFError:
break
if epoch:
final_test_gndtrth = torch.cat(final_test_gndtrth, dim=0).cpu().numpy()
final_test_predict = torch.cat(final_test_predict, dim=0).cpu().numpy()
final_test_probabe = torch.cat(final_test_probabe, dim=0).cpu().numpy()
test_f1_score = f1_score(final_test_gndtrth, final_test_predict)
test_roc_auc_score = roc_auc_score(final_test_gndtrth, final_test_probabe)
final_result.append(test_f1_score)
final_result.append(test_roc_auc_score)
with open(result_path, "wb") as f:
pickle.dump((final_test_gndtrth, final_test_predict, final_test_probabe), f)
print("Saved Attack Test Ground Truth and Predict Sets")
print("Test F1: %f\nAUC: %f" % (test_f1_score, test_roc_auc_score))
final_result.append(1.*correct/total)
print( 'Test Acc: %.3f%% (%d/%d)' % (100.*correct/(1.0*total), correct, total))
return final_result
def delete_pickle(self):
train_file = glob.glob(self.ATTACK_SETS +"train.p")
for trf in train_file:
os.remove(trf)
test_file = glob.glob(self.ATTACK_SETS +"test.p")
for tef in test_file:
os.remove(tef)
def saveModel(self, path):
torch.save(self.attack_model.state_dict(), path)
class attack_for_whitebox():
def __init__(self, TARGET_PATH, SHADOW_PATH, ATTACK_SETS, attack_train_loader, attack_test_loader, target_model, shadow_model, attack_model, device, class_num):
self.device = device
self.class_num = class_num
self.ATTACK_SETS = ATTACK_SETS
self.TARGET_PATH = TARGET_PATH
self.target_model = target_model.to(self.device)
self.target_model.load_state_dict(torch.load(self.TARGET_PATH))
self.target_model.eval()
self.SHADOW_PATH = SHADOW_PATH
self.shadow_model = shadow_model.to(self.device)
self.shadow_model.load_state_dict(torch.load(self.SHADOW_PATH))
self.shadow_model.eval()
self.attack_train_loader = attack_train_loader
self.attack_test_loader = attack_test_loader
self.attack_model = attack_model.to(self.device)
torch.manual_seed(0)
self.attack_model.apply(weights_init)
self.target_criterion = nn.CrossEntropyLoss(reduction='none')
self.attack_criterion = nn.CrossEntropyLoss()
#self.optimizer = optim.SGD(self.attack_model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
self.optimizer = optim.Adam(self.attack_model.parameters(), lr=1e-5)
self.attack_train_data = None
self.attack_test_data = None
def _get_data(self, model, inputs, targets):
results = model(inputs)
# outputs = F.softmax(outputs, dim=1)
losses = self.target_criterion(results, targets)
gradients = []
for loss in losses:
loss.backward(retain_graph=True)
gradient_list = reversed(list(model.named_parameters()))
for name, parameter in gradient_list:
if 'weight' in name:
gradient = parameter.grad.clone() # [column[:, None], row].resize_(100,100)
gradient = gradient.unsqueeze_(0)
gradients.append(gradient.unsqueeze_(0))
break
labels = []
for num in targets:
label = [0 for i in range(self.class_num)]
label[num.item()] = 1
labels.append(label)
gradients = torch.cat(gradients, dim=0)
losses = losses.unsqueeze_(1).detach()
outputs, _ = torch.sort(results, descending=True)
labels = torch.Tensor(labels)
return outputs, losses, gradients, labels
def prepare_dataset(self):
with open(self.ATTACK_SETS + "train.p", "wb") as f:
for inputs, targets, members in self.attack_train_loader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
output, loss, gradient, label = self._get_data(self.shadow_model, inputs, targets)
pickle.dump((output, loss, gradient, label, members), f)
print("Finished Saving Train Dataset")
with open(self.ATTACK_SETS + "test.p", "wb") as f:
for inputs, targets, members in self.attack_test_loader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
output, loss, gradient, label = self._get_data(self.target_model, inputs, targets)
pickle.dump((output, loss, gradient, label, members), f)
# pickle.dump((output, loss, gradient, label, members), open(self.ATTACK_PATH + "test.p", "wb"))
print("Finished Saving Test Dataset")
def train(self, epoch, result_path):
self.attack_model.train()
batch_idx = 1
train_loss = 0
correct = 0
total = 0
final_train_gndtrth = []
final_train_predict = []
final_train_probabe = []
final_result = []
with open(self.ATTACK_SETS + "train.p", "rb") as f:
while(True):
try:
output, loss, gradient, label, members = pickle.load(f)
output, loss, gradient, label, members = output.to(self.device), loss.to(self.device), gradient.to(self.device), label.to(self.device), members.to(self.device)
results = self.attack_model(output, loss, gradient, label)
# results = F.softmax(results, dim=1)
losses = self.attack_criterion(results, members)
losses.backward()
self.optimizer.step()
train_loss += losses.item()
_, predicted = results.max(1)
total += members.size(0)
correct += predicted.eq(members).sum().item()
if epoch:
final_train_gndtrth.append(members)
final_train_predict.append(predicted)
final_train_probabe.append(results[:, 1])
batch_idx += 1
except EOFError:
break
if epoch:
final_train_gndtrth = torch.cat(final_train_gndtrth, dim=0).cpu().detach().numpy()
final_train_predict = torch.cat(final_train_predict, dim=0).cpu().detach().numpy()
final_train_probabe = torch.cat(final_train_probabe, dim=0).cpu().detach().numpy()
train_f1_score = f1_score(final_train_gndtrth, final_train_predict)
train_roc_auc_score = roc_auc_score(final_train_gndtrth, final_train_probabe)
final_result.append(train_f1_score)
final_result.append(train_roc_auc_score)
with open(result_path, "wb") as f:
pickle.dump((final_train_gndtrth, final_train_predict, final_train_probabe), f)
print("Saved Attack Train Ground Truth and Predict Sets")
print("Train F1: %f\nAUC: %f" % (train_f1_score, train_roc_auc_score))
final_result.append(1.*correct/total)
print( 'Train Acc: %.3f%% (%d/%d) | Loss: %.3f' % (100.*correct/total, correct, total, 1.*train_loss/batch_idx))
return final_result
def test(self, epoch, result_path):
self.attack_model.eval()
batch_idx = 1
correct = 0
total = 0
final_test_gndtrth = []
final_test_predict = []
final_test_probabe = []
final_result = []
with torch.no_grad():
with open(self.ATTACK_SETS + "test.p", "rb") as f:
while(True):
try:
output, loss, gradient, label, members = pickle.load(f)
output, loss, gradient, label, members = output.to(self.device), loss.to(self.device), gradient.to(self.device), label.to(self.device), members.to(self.device)
results = self.attack_model(output, loss, gradient, label)
_, predicted = results.max(1)
total += members.size(0)
correct += predicted.eq(members).sum().item()
results = F.softmax(results, dim=1)
if epoch:
final_test_gndtrth.append(members)
final_test_predict.append(predicted)
final_test_probabe.append(results[:, 1])
batch_idx += 1
except EOFError:
break
if epoch:
final_test_gndtrth = torch.cat(final_test_gndtrth, dim=0).cpu().numpy()
final_test_predict = torch.cat(final_test_predict, dim=0).cpu().numpy()
final_test_probabe = torch.cat(final_test_probabe, dim=0).cpu().numpy()
test_f1_score = f1_score(final_test_gndtrth, final_test_predict)
test_roc_auc_score = roc_auc_score(final_test_gndtrth, final_test_probabe)
final_result.append(test_f1_score)
final_result.append(test_roc_auc_score)
with open(result_path, "wb") as f:
pickle.dump((final_test_gndtrth, final_test_predict, final_test_probabe), f)
print("Saved Attack Test Ground Truth and Predict Sets")
print("Test F1: %f\nAUC: %f" % (test_f1_score, test_roc_auc_score))
final_result.append(1.*correct/total)
print( 'Test Acc: %.3f%% (%d/%d)' % (100.*correct/(1.0*total), correct, total))
return final_result
def delete_pickle(self):
train_file = glob.glob(self.ATTACK_SETS +"train.p")
for trf in train_file:
os.remove(trf)
test_file = glob.glob(self.ATTACK_SETS +"test.p")
for tef in test_file:
os.remove(tef)
def saveModel(self, path):
torch.save(self.attack_model.state_dict(), path)
def train_shadow_model(PATH, device, shadow_model, train_loader, test_loader, use_DP, noise, norm, loss, optimizer, delta):
model = shadow(train_loader, test_loader, shadow_model, device, use_DP, noise, norm, loss, optimizer, delta)
acc_train = 0
acc_test = 0
for i in range(100):
print("<======================= Epoch " + str(i+1) + " =======================>")
print("shadow training")
acc_train = model.train()
print("shadow testing")
acc_test = model.test()
overfitting = round(acc_train - acc_test, 6)
print('The overfitting rate is %s' % overfitting)
FILE_PATH = PATH + "_shadow.pth"
model.saveModel(FILE_PATH)
print("saved shadow model!!!")
print("Finished training!!!")
return acc_train, acc_test, overfitting
def train_shadow_distillation(MODEL_PATH, DL_PATH, device, target_model, student_model, train_loader, test_loader):
distillation = distillation_training(MODEL_PATH, train_loader, test_loader, student_model, target_model, device)
for i in range(100):
print("<======================= Epoch " + str(i+1) + " =======================>")
print("shadow distillation training")
acc_distillation_train = distillation.train()
print("shadow distillation testing")
acc_distillation_test = distillation.test()
overfitting = round(acc_distillation_train - acc_distillation_test, 6)
print('The overfitting rate is %s' % overfitting)
result_path = DL_PATH + "_shadow.pth"
distillation.saveModel(result_path)
print("Saved shadow model!!!")
print("Finished training!!!")
return acc_distillation_train, acc_distillation_test, overfitting
def get_attack_dataset_without_shadow(train_set, test_set, batch_size):
mem_length = len(train_set)//3
nonmem_length = len(test_set)//3
mem_train, mem_test, _ = torch.utils.data.random_split(train_set, [mem_length, mem_length, len(train_set)-(mem_length*2)])
nonmem_train, nonmem_test, _ = torch.utils.data.random_split(test_set, [nonmem_length, nonmem_length, len(test_set)-(nonmem_length*2)])
mem_train, mem_test, nonmem_train, nonmem_test = list(mem_train), list(mem_test), list(nonmem_train), list(nonmem_test)
for i in range(len(mem_train)):
mem_train[i] = mem_train[i] + (1,)
for i in range(len(nonmem_train)):
nonmem_train[i] = nonmem_train[i] + (0,)
for i in range(len(nonmem_test)):
nonmem_test[i] = nonmem_test[i] + (0,)
for i in range(len(mem_test)):
mem_test[i] = mem_test[i] + (1,)
attack_train = mem_train + nonmem_train
attack_test = mem_test + nonmem_test
attack_trainloader = torch.utils.data.DataLoader(
attack_train, batch_size=batch_size, shuffle=True, num_workers=2)
attack_testloader = torch.utils.data.DataLoader(
attack_test, batch_size=batch_size, shuffle=True, num_workers=2)
return attack_trainloader, attack_testloader
def get_attack_dataset_with_shadow(target_train, target_test, shadow_train, shadow_test, batch_size):
mem_train, nonmem_train, mem_test, nonmem_test = list(shadow_train), list(shadow_test), list(target_train), list(target_test)
for i in range(len(mem_train)):
mem_train[i] = mem_train[i] + (1,)
for i in range(len(nonmem_train)):
nonmem_train[i] = nonmem_train[i] + (0,)
for i in range(len(nonmem_test)):
nonmem_test[i] = nonmem_test[i] + (0,)
for i in range(len(mem_test)):
mem_test[i] = mem_test[i] + (1,)
train_length = min(len(mem_train), len(nonmem_train))
test_length = min(len(mem_test), len(nonmem_test))
mem_train, _ = torch.utils.data.random_split(mem_train, [train_length, len(mem_train) - train_length])
non_mem_train, _ = torch.utils.data.random_split(nonmem_train, [train_length, len(nonmem_train) - train_length])
mem_test, _ = torch.utils.data.random_split(mem_test, [test_length, len(mem_test) - test_length])
non_mem_test, _ = torch.utils.data.random_split(nonmem_test, [test_length, len(nonmem_test) - test_length])
attack_train = mem_train + non_mem_train
attack_test = mem_test + non_mem_test
attack_trainloader = torch.utils.data.DataLoader(
attack_train, batch_size=batch_size, shuffle=True, num_workers=2)
attack_testloader = torch.utils.data.DataLoader(
attack_test, batch_size=batch_size, shuffle=True, num_workers=2)
return attack_trainloader, attack_testloader
# black shadow
def attack_mode0(TARGET_PATH, SHADOW_PATH, ATTACK_PATH, device, attack_trainloader, attack_testloader, target_model, shadow_model, attack_model, get_attack_set, num_classes):
MODELS_PATH = ATTACK_PATH + "_meminf_attack0.pth"
RESULT_PATH = ATTACK_PATH + "_meminf_attack0.p"
ATTACK_SETS = ATTACK_PATH + "_meminf_attack_mode0_"
attack = attack_for_blackbox(SHADOW_PATH, TARGET_PATH, ATTACK_SETS, attack_trainloader, attack_testloader, target_model, shadow_model, attack_model, device)
if get_attack_set:
attack.delete_pickle()
attack.prepare_dataset()
for i in range(50):
flag = 1 if i == 49 else 0
print("Epoch %d :" % (i+1))
res_train = attack.train(flag, RESULT_PATH)
res_test = attack.test(flag, RESULT_PATH)
attack.saveModel(MODELS_PATH)
print("Saved Attack Model")
return res_train, res_test
# black partial
def attack_mode1(TARGET_PATH, ATTACK_PATH, device, attack_trainloader, attack_testloader, target_model, attack_model, get_attack_set, num_classes):
MODELS_PATH = ATTACK_PATH + "_meminf_attack1.pth"
RESULT_PATH = ATTACK_PATH + "_meminf_attack1.p"
ATTACK_SETS = ATTACK_PATH + "_meminf_attack_mode1_"
attack = attack_for_blackbox(TARGET_PATH, TARGET_PATH, ATTACK_SETS, attack_trainloader, attack_testloader, target_model, target_model, attack_model, device)
if get_attack_set:
attack.delete_pickle()
attack.prepare_dataset()
for i in range(50):
flag = 1 if i == 49 else 0
print("Epoch %d :" % (i+1))
res_train = attack.train(flag, RESULT_PATH)
res_test = attack.test(flag, RESULT_PATH)
attack.saveModel(MODELS_PATH)
print("Saved Attack Model")
return res_train, res_test
# white partial
def attack_mode2(TARGET_PATH, ATTACK_PATH, device, attack_trainloader, attack_testloader, target_model, attack_model, get_attack_set, num_classes):
MODELS_PATH = ATTACK_PATH + "_meminf_attack2.pth"
RESULT_PATH = ATTACK_PATH + "_meminf_attack2.p"
ATTACK_SETS = ATTACK_PATH + "_meminf_attack_mode2_"
attack = attack_for_whitebox(TARGET_PATH, TARGET_PATH, ATTACK_SETS, attack_trainloader, attack_testloader, target_model, target_model, attack_model, device, num_classes)
if get_attack_set:
attack.delete_pickle()
attack.prepare_dataset()
for i in range(50):
flag = 1 if i == 49 else 0
print("Epoch %d :" % (i+1))
res_train = attack.train(flag, RESULT_PATH)
res_test = attack.test(flag, RESULT_PATH)
attack.saveModel(MODELS_PATH)
print("Saved Attack Model")
return res_train, res_test
# white shadow
def attack_mode3(TARGET_PATH, SHADOW_PATH, ATTACK_PATH, device, attack_trainloader, attack_testloader, target_model, shadow_model, attack_model, get_attack_set, num_classes):
MODELS_PATH = ATTACK_PATH + "_meminf_attack3.pth"
RESULT_PATH = ATTACK_PATH + "_meminf_attack3.p"
ATTACK_SETS = ATTACK_PATH + "_meminf_attack_mode3_"
attack = attack_for_whitebox(TARGET_PATH, SHADOW_PATH, ATTACK_SETS, attack_trainloader, attack_testloader, target_model, shadow_model, attack_model, device, num_classes)
if get_attack_set:
attack.delete_pickle()
attack.prepare_dataset()
for i in range(50):
flag = 1 if i == 49 else 0
print("Epoch %d :" % (i+1))
res_train = attack.train(flag, RESULT_PATH)
res_test = attack.test(flag, RESULT_PATH)
attack.saveModel(MODELS_PATH)
print("Saved Attack Model")
return res_train, res_test
def get_gradient_size(model):
gradient_size = []
gradient_list = reversed(list(model.named_parameters()))
for name, parameter in gradient_list:
if 'weight' in name:
gradient_size.append(parameter.shape)
return gradient_size
| 31,612 | 37.042118 | 183 | py |
IVOS-ATNet | IVOS-ATNet-master/config.py | import os
class Config(object):
def __init__(self):
################################ C ##################################
# DAVIS path
self.davis_dataset_dir = '/home/yuk/data_ssd/datasets/DAVIS'
self.test_gpu_id = 2
self.test_metric_list = ['J', 'J_AND_F']
################################ For test parameters ##################################
self.test_host = 'localhost' # 'localhost' for subsets train and val.
self.test_subset = 'val'
self.test_userkey = None
self.test_propagation_proportion = 0.99
self.test_propth = 0.8
self.test_min_nb_nodes = 2
self.test_save_all_segs_option = True
############################### Other parameters ##################################
self.mean, self.var = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
self.scribble_dilation_param = 5
# Rel path
# project_path = os.path.dirname(__file__)
# self.font_dir = project_path + '/fonts/'
self.palette_dir = self.davis_dataset_dir + '/Annotations/480p/bear/00000.png'
self.test_result_df_dir = 'results/test_result_davisframework'
self.test_result_rw_dir = 'results/test_result_realworld'
self.test_load_state_dir = 'ATNet-checkpoint.pth' # CKpath
| 1,329 | 40.5625 | 95 | py |
IVOS-ATNet | IVOS-ATNet-master/eval_real-world.py | from davisinteractive.session import DavisInteractiveSession
from davisinteractive import utils as interactive_utils
from davisinteractive.dataset import Davis
from davisinteractive.metrics import batched_jaccard
from libs import custom_transforms as tr, davis2017_torchdataset
import os
import numpy as np
from PIL import Image
import csv
from datetime import datetime
import torch
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
from libs import utils, utils_torch
from libs.analyze_report import analyze_summary
from config import Config
from networks.atnet import ATnet
class Main_tester(object):
def __init__(self, config):
self.config = config
self.Davisclass = Davis(self.config.davis_dataset_dir)
self.current_time = datetime.now().strftime('%Y%m%d-%H%M%S')
self._palette = Image.open(self.config.palette_dir).getpalette()
self.save_res_dir = str()
self.save_log_dir = str()
self.save_logger = None
self.save_csvsummary_dir = str()
self.net = ATnet()
self.net.cuda()
self.net.eval()
self.net.load_state_dict(torch.load(self.config.test_load_state_dir))
# To implement ordered test
self.scr_indices = [1, 2, 3]
self.max_nb_interactions = 8
self.max_time = self.max_nb_interactions * 30
self.scr_samples = []
for v in sorted(self.Davisclass.sets[self.config.test_subset]):
for idx in self.scr_indices:
self.scr_samples.append((v, idx))
self.img_size, self.num_frames, self.n_objects, self.final_masks, self.tmpdict_siact = None, None, None, None, None
self.pad_info, self.hpad1, self.wpad1, self.hpad2, self.wpad2 = None, None, None, None, None
def run_for_diverse_metrics(self, ):
with torch.no_grad():
for metric in self.config.test_metric_list:
if metric == 'J':
dir_name = os.path.split(os.path.split(__file__)[0])[1] + '[J]_' + self.current_time
elif metric == 'J_AND_F':
dir_name = os.path.split(os.path.split(__file__)[0])[1] + '[JF]_' + self.current_time
else:
dir_name = None
print("Impossible metric is contained in config.test_metric_list!")
raise NotImplementedError()
self.save_res_dir = os.path.join(self.config.test_result_dir, dir_name)
utils.mkdir(self.save_res_dir)
self.save_csvsummary_dir = os.path.join(self.save_res_dir, 'summary_in_csv.csv')
self.save_log_dir = os.path.join(self.save_res_dir, 'test_logs.txt')
self.save_logger = utils.logger(self.save_log_dir)
self.save_logger.printNlog(dir_name)
curr_path = os.path.dirname(os.path.abspath(__file__))
os.system('cp {}/config.py {}/config.py'.format(curr_path, self.save_res_dir))
self.run_IVOS(metric)
def run_IVOS(self, metric):
seen_seq = {}
numseq, tmpseq = 0, ''
output_dict = dict()
output_dict['average_objs_iou'] = dict()
output_dict['average_iact_iou'] = np.zeros(self.max_nb_interactions)
output_dict['annotated_frames'] = dict()
with open(self.save_csvsummary_dir, mode='a') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['sequence', 'obj_idx', 'scr_idx'] + ['round-' + str(i + 1) for i in range(self.max_nb_interactions)])
with DavisInteractiveSession(host=self.config.test_host,
user_key=self.config.test_userkey,
davis_root=self.config.davis_dataset_dir,
subset=self.config.test_subset,
report_save_dir=self.save_res_dir,
max_nb_interactions=self.max_nb_interactions,
max_time=self.max_time,
metric_to_optimize=metric) as sess:
sess.connector.service.robot.min_nb_nodes = self.config.test_min_nb_nodes
sess.samples = self.scr_samples
# sess.samples = [('dog', 3)]
while sess.next():
# Get the current iteration scribbles
self.sequence, scribbles, first_scribble = sess.get_scribbles(only_last=False)
if first_scribble:
anno_dict = {'frames': [], 'annotated_masks': [], 'masks_tobe_modified': []}
n_interaction = 1
info = Davis.dataset[self.sequence]
self.img_size = info['image_size'][::-1]
self.num_frames = info['num_frames']
self.n_objects = info['num_objects']
info = None
seen_seq[self.sequence] = 1 if self.sequence not in seen_seq.keys() else seen_seq[self.sequence] + 1
scr_id = seen_seq[self.sequence]
self.final_masks = np.zeros([self.num_frames, self.img_size[0], self.img_size[1]])
self.pad_info = utils.apply_pad(self.final_masks[0])[1]
self.hpad1, self.wpad1 = self.pad_info[0][0], self.pad_info[1][0]
self.hpad2, self.wpad2 = self.pad_info[0][1], self.pad_info[1][1]
self.h_ds, self.w_ds = int((self.img_size[0] + sum(self.pad_info[0])) / 4), int((self.img_size[1] + sum(self.pad_info[1])) / 4)
self.anno_6chEnc_r5_list = []
self.anno_3chEnc_r5_list = []
self.prob_map_of_frames = torch.zeros((self.num_frames, self.n_objects, 4 * self.h_ds, 4 * self.w_ds)).cuda()
self.gt_masks = self.Davisclass.load_annotations(self.sequence)
IoU_over_eobj = []
else:
n_interaction += 1
self.save_logger.printNlog('\nRunning sequence {} in (scribble index: {}) (round: {})'
.format(self.sequence, sess.samples[sess.sample_idx][1], n_interaction))
annotated_now = interactive_utils.scribbles.annotated_frames(sess.sample_last_scribble)[0]
anno_dict['frames'].append(annotated_now) # Where we save annotated frames
anno_dict['masks_tobe_modified'].append(self.final_masks[annotated_now]) # mask before modefied at the annotated frame
# Get Predicted mask & Mask decision from pred_mask
self.final_masks = self.run_VOS_singleiact(n_interaction, scribbles, anno_dict['frames']) # self.final_mask changes
if self.config.test_save_all_segs_option:
utils.mkdir(
os.path.join(self.save_res_dir, 'result_video', '{}-scr{:02d}/round{:02d}'.format(self.sequence, scr_id, n_interaction)))
for fr in range(self.num_frames):
savefname = os.path.join(self.save_res_dir, 'result_video',
'{}-scr{:02d}/round{:02d}'.format(self.sequence, scr_id, n_interaction),
'{:05d}.png'.format(fr))
tmpPIL = Image.fromarray(self.final_masks[fr].astype(np.uint8), 'P')
tmpPIL.putpalette(self._palette)
tmpPIL.save(savefname)
# Submit your prediction
sess.submit_masks(self.final_masks) # F, H, W
# print sequence name
if tmpseq != self.sequence:
tmpseq, numseq = self.sequence, numseq + 1
print(str(numseq) + ':' + str(self.sequence) + '-' + str(seen_seq[self.sequence]) + '\n')
## Visualizers and Saver
# IoU estimation
jaccard = batched_jaccard(self.gt_masks,
self.final_masks,
average_over_objects=False,
nb_objects=self.n_objects
) # frames, objid
IoU_over_eobj.append(jaccard)
anno_dict['annotated_masks'].append(self.final_masks[annotated_now]) # mask after modefied at the annotated frame
if self.max_nb_interactions == len(anno_dict['frames']): # After Lastround -> total 90 iter
seq_scrid_name = self.sequence + str(scr_id)
# IoU manager
IoU_over_eobj = np.stack(IoU_over_eobj, axis=0) # niact,frames,n_obj
IoUeveryround_perobj = np.mean(IoU_over_eobj, axis=1) # niact,n_obj
output_dict['average_iact_iou'] += np.sum(IoU_over_eobj[list(range(n_interaction)), anno_dict['frames']], axis=-1)
output_dict['annotated_frames'][seq_scrid_name] = anno_dict['frames']
# write csv
for obj_idx in range(self.n_objects):
with open(self.save_csvsummary_dir, mode='a') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow([self.sequence, str(obj_idx + 1), str(scr_id)] + list(IoUeveryround_perobj[:, obj_idx]))
summary = sess.get_global_summary(save_file=self.save_res_dir + '/summary_' + sess.report_name[7:] + '.json')
analyze_summary(self.save_res_dir + '/summary_' + sess.report_name[7:] + '.json', metric=metric)
# final_IOU = summary['curve'][metric][-1]
average_IoU_per_round = summary['curve'][metric][1:-1]
torch.cuda.empty_cache()
model = None
return average_IoU_per_round
def run_VOS_singleiact(self, n_interaction, scribbles_data, annotated_frames):
annotated_frames_np = np.array(annotated_frames)
num_workers = 4
annotated_now = annotated_frames[-1]
scribbles_list = scribbles_data['scribbles']
seq_name = scribbles_data['sequence']
output_masks = self.final_masks.copy().astype(np.float64)
prop_list = utils.get_prop_list(annotated_frames, annotated_now, self.num_frames, proportion=self.config.test_propagation_proportion)
prop_fore = sorted(prop_list)[0]
prop_rear = sorted(prop_list)[-1]
# Interaction settings
pm_ps_ns_3ch_t = [] # n_obj,3,h,w
if n_interaction == 1:
for obj_id in range(1, self.n_objects + 1):
pos_scrimg = utils.scribble_to_image(scribbles_list, annotated_now, obj_id,
dilation=self.config.scribble_dilation_param,
prev_mask=self.final_masks[annotated_now])
pm_ps_ns_3ch_t.append(np.stack([np.ones_like(pos_scrimg) / 2, pos_scrimg, np.zeros_like(pos_scrimg)], axis=0))
pm_ps_ns_3ch_t = np.stack(pm_ps_ns_3ch_t, axis=0) # n_obj,3,h,w
# Image.fromarray((scr_img[:, :, 1] * 255).astype(np.uint8)).save('/home/six/Desktop/CVPRW_figure/judo_obj1_scr.png')
else:
for obj_id in range(1, self.n_objects + 1):
prev_round_input = (self.final_masks[annotated_now] == obj_id).astype(np.float32) # H,W
pos_scrimg, neg_scrimg = utils.scribble_to_image(scribbles_list, annotated_now, obj_id,
dilation=self.config.scribble_dilation_param,
prev_mask=self.final_masks[annotated_now], blur=True,
singleimg=False, seperate_pos_neg=True)
pm_ps_ns_3ch_t.append(np.stack([prev_round_input, pos_scrimg, neg_scrimg], axis=0))
pm_ps_ns_3ch_t = np.stack(pm_ps_ns_3ch_t, axis=0) # n_obj,3,h,w
pm_ps_ns_3ch_t = torch.from_numpy(pm_ps_ns_3ch_t).cuda()
if (prop_list[0] != annotated_now) and (prop_list.count(annotated_now) != 2):
print(str(prop_list))
raise NotImplementedError
print(str(prop_list)) # we made our proplist first backward, and then forward
composed_transforms = transforms.Compose([tr.Normalize_ApplymeanvarImage(self.config.mean, self.config.var),
tr.ToTensor()])
db_test = davis2017_torchdataset.DAVIS2017(split='val', transform=composed_transforms, root=self.config.davis_dataset_dir,
custom_frames=prop_list, seq_name=seq_name, rgb=True,
obj_id=None, no_gt=True, retname=True, prev_round_masks=self.final_masks, )
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=num_workers, pin_memory=True)
flag = 0 # 1: propagating backward, 2: propagating forward
print('[{:01d} round] processing...'.format(n_interaction))
for ii, batched in enumerate(testloader):
# batched : image, scr_img, 0~fr, meta
inpdict = dict()
operating_frame = int(batched['meta']['frame_id'][0])
for inp in batched:
if inp == 'meta': continue
inpdict[inp] = Variable(batched[inp]).cuda()
inpdict['image'] = inpdict['image'].expand(self.n_objects, -1, -1, -1)
#################### Iaction ########################
if operating_frame == annotated_now: # Check the round is on interaction
if flag == 0:
flag += 1
adjacent_to_anno = True
elif flag == 1:
flag += 1
adjacent_to_anno = True
continue
else:
raise NotImplementedError
pm_ps_ns_3ch_t = torch.nn.ReflectionPad2d(self.pad_info[1] + self.pad_info[0])(pm_ps_ns_3ch_t)
inputs = torch.cat([inpdict['image'], pm_ps_ns_3ch_t], dim=1)
output_logit, anno_6chEnc_r5 = self.net.forward_ANet(inputs) # [nobj, 1, P_H, P_W], # [n_obj,2048,h/16,w/16]
output_prob_anno = torch.sigmoid(output_logit)
prob_onehot_t = output_prob_anno[:, 0].detach()
anno_3chEnc_r5, _, _, r2_prev_fromanno = self.net.encoder_3ch.forward(inpdict['image'])
self.anno_6chEnc_r5_list.append(anno_6chEnc_r5)
self.anno_3chEnc_r5_list.append(anno_3chEnc_r5)
if len(self.anno_6chEnc_r5_list) != len(annotated_frames):
raise NotImplementedError
#################### Propagation ########################
else:
# Flag [1: propagating backward, 2: propagating forward]
if adjacent_to_anno:
r2_prev = r2_prev_fromanno
predmask_prev = output_prob_anno
else:
predmask_prev = output_prob_prop
adjacent_to_anno = False
output_logit, r2_prev = self.net.forward_TNet(
self.anno_3chEnc_r5_list, inpdict['image'], self.anno_6chEnc_r5_list, r2_prev, predmask_prev) # [nobj, 1, P_H, P_W]
output_prob_prop = torch.sigmoid(output_logit)
prob_onehot_t = output_prob_prop[:, 0].detach()
smallest_alpha = 0.5
if flag == 1:
sorted_frames = annotated_frames_np[annotated_frames_np < annotated_now]
if len(sorted_frames) == 0:
alpha = 1
else:
closest_addianno_frame = np.max(sorted_frames)
alpha = smallest_alpha + (1 - smallest_alpha) * (
(operating_frame - closest_addianno_frame) / (annotated_now - closest_addianno_frame))
else:
sorted_frames = annotated_frames_np[annotated_frames_np > annotated_now]
if len(sorted_frames) == 0:
alpha = 1
else:
closest_addianno_frame = np.min(sorted_frames)
alpha = smallest_alpha + (1 - smallest_alpha) * (
(closest_addianno_frame - operating_frame) / (closest_addianno_frame - annotated_now))
prob_onehot_t = (alpha * prob_onehot_t) + ((1 - alpha) * self.prob_map_of_frames[operating_frame])
# Final mask indexing
self.prob_map_of_frames[operating_frame] = prob_onehot_t
output_masks[prop_fore:prop_rear + 1] = \
utils_torch.combine_masks_with_batch(self.prob_map_of_frames[prop_fore:prop_rear + 1],
n_obj=self.n_objects, th=self.config.test_propth
)[:, 0, self.hpad1:-self.hpad2, self.wpad1:-self.wpad2].cpu().numpy().astype(np.float) # f,h,w
torch.cuda.empty_cache()
return output_masks
if __name__ == '__main__':
config = Config()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(config.test_gpu_id)
tester = Main_tester(config)
tester.run_for_diverse_metrics()
# try:main_val(model,
# Config,
# min_nb_nodes= min_nb_nodes,
# simplyfied_testset= simplyfied_test,tr(config.test_gpu_id)
# metric = metric)
# except: continue
| 17,873 | 50.2149 | 147 | py |
IVOS-ATNet | IVOS-ATNet-master/eval_davis-framework.py | from davisinteractive.session import DavisInteractiveSession
from davisinteractive import utils as interactive_utils
from davisinteractive.dataset import Davis
from davisinteractive.metrics import batched_jaccard
from libs import custom_transforms as tr, davis2017_torchdataset
import os
import numpy as np
from PIL import Image
import csv
from datetime import datetime
import torch
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
from libs import utils, utils_torch
from libs.analyze_report import analyze_summary
from config import Config
from networks.atnet import ATnet
class Main_tester(object):
def __init__(self, config):
self.config = config
self.Davisclass = Davis(self.config.davis_dataset_dir)
self.current_time = datetime.now().strftime('%Y%m%d-%H%M%S')
self._palette = Image.open(self.config.palette_dir).getpalette()
self.save_res_dir = str()
self.save_log_dir = str()
self.save_logger = None
self.save_csvsummary_dir = str()
self.net = ATnet()
self.net.cuda()
self.net.eval()
self.net.load_state_dict(torch.load(self.config.test_load_state_dir))
# To implement ordered test
self.scr_indices = [1, 2, 3]
self.max_nb_interactions = 8
self.max_time = self.max_nb_interactions * 30
self.scr_samples = []
for v in sorted(self.Davisclass.sets[self.config.test_subset]):
for idx in self.scr_indices:
self.scr_samples.append((v, idx))
self.img_size, self.num_frames, self.n_objects, self.final_masks, self.tmpdict_siact = None, None, None, None, None
self.pad_info, self.hpad1, self.wpad1, self.hpad2, self.wpad2 = None, None, None, None, None
def run_for_diverse_metrics(self, ):
with torch.no_grad():
for metric in self.config.test_metric_list:
if metric == 'J':
dir_name = 'IVOS-ATNet_J_' + self.current_time
elif metric == 'J_AND_F':
dir_name = 'IVOS-ATNet_JF_' + self.current_time
else:
dir_name = None
print("Impossible metric is contained in config.test_metric_list!")
raise NotImplementedError()
self.save_res_dir = os.path.join(self.config.test_result_df_dir, dir_name)
utils.mkdir(self.save_res_dir)
self.save_csvsummary_dir = os.path.join(self.save_res_dir, 'summary_in_csv.csv')
self.save_log_dir = os.path.join(self.save_res_dir, 'test_logs.txt')
self.save_logger = utils.logger(self.save_log_dir)
self.save_logger.printNlog(dir_name)
curr_path = os.path.dirname(os.path.abspath(__file__))
os.system('cp {}/config.py {}/config.py'.format(curr_path, self.save_res_dir))
self.run_IVOS(metric)
def run_IVOS(self, metric):
seen_seq = {}
numseq, tmpseq = 0, ''
output_dict = dict()
output_dict['average_objs_iou'] = dict()
output_dict['average_iact_iou'] = np.zeros(self.max_nb_interactions)
output_dict['annotated_frames'] = dict()
with open(self.save_csvsummary_dir, mode='a') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['sequence', 'obj_idx', 'scr_idx'] + ['round-' + str(i + 1) for i in range(self.max_nb_interactions)])
with DavisInteractiveSession(host=self.config.test_host,
user_key=self.config.test_userkey,
davis_root=self.config.davis_dataset_dir,
subset=self.config.test_subset,
report_save_dir=self.save_res_dir,
max_nb_interactions=self.max_nb_interactions,
max_time=self.max_time,
metric_to_optimize=metric) as sess:
sess.connector.service.robot.min_nb_nodes = self.config.test_min_nb_nodes
sess.samples = self.scr_samples
# sess.samples = [('dog', 3)]
while sess.next():
# Get the current iteration scribbles
self.sequence, scribbles, first_scribble = sess.get_scribbles(only_last=False)
if first_scribble:
anno_dict = {'frames': [], 'annotated_masks': [], 'masks_tobe_modified': []}
n_interaction = 1
info = Davis.dataset[self.sequence]
self.img_size = info['image_size'][::-1]
self.num_frames = info['num_frames']
self.n_objects = info['num_objects']
info = None
seen_seq[self.sequence] = 1 if self.sequence not in seen_seq.keys() else seen_seq[self.sequence] + 1
scr_id = seen_seq[self.sequence]
self.final_masks = np.zeros([self.num_frames, self.img_size[0], self.img_size[1]])
self.pad_info = utils.apply_pad(self.final_masks[0])[1]
self.hpad1, self.wpad1 = self.pad_info[0][0], self.pad_info[1][0]
self.hpad2, self.wpad2 = self.pad_info[0][1], self.pad_info[1][1]
self.h_ds, self.w_ds = int((self.img_size[0] + sum(self.pad_info[0])) / 4), int((self.img_size[1] + sum(self.pad_info[1])) / 4)
self.anno_6chEnc_r5_list = []
self.anno_3chEnc_r5_list = []
self.prob_map_of_frames = torch.zeros((self.num_frames, self.n_objects, 4 * self.h_ds, 4 * self.w_ds)).cuda()
self.gt_masks = self.Davisclass.load_annotations(self.sequence)
IoU_over_eobj = []
else:
n_interaction += 1
self.save_logger.printNlog('\nRunning sequence {} in (scribble index: {}) (round: {})'
.format(self.sequence, sess.samples[sess.sample_idx][1], n_interaction))
annotated_now = interactive_utils.scribbles.annotated_frames(sess.sample_last_scribble)[0]
anno_dict['frames'].append(annotated_now) # Where we save annotated frames
anno_dict['masks_tobe_modified'].append(self.final_masks[annotated_now]) # mask before modefied at the annotated frame
# Get Predicted mask & Mask decision from pred_mask
self.final_masks = self.run_VOS_singleiact(n_interaction, scribbles, anno_dict['frames']) # self.final_mask changes
if self.config.test_save_all_segs_option:
utils.mkdir(
os.path.join(self.save_res_dir, 'result_video', '{}-scr{:02d}/round{:02d}'.format(self.sequence, scr_id, n_interaction)))
for fr in range(self.num_frames):
savefname = os.path.join(self.save_res_dir, 'result_video',
'{}-scr{:02d}/round{:02d}'.format(self.sequence, scr_id, n_interaction),
'{:05d}.png'.format(fr))
tmpPIL = Image.fromarray(self.final_masks[fr].astype(np.uint8), 'P')
tmpPIL.putpalette(self._palette)
tmpPIL.save(savefname)
# Submit your prediction
sess.submit_masks(self.final_masks) # F, H, W
# print sequence name
if tmpseq != self.sequence:
tmpseq, numseq = self.sequence, numseq + 1
print(str(numseq) + ':' + str(self.sequence) + '-' + str(seen_seq[self.sequence]) + '\n')
## Visualizers and Saver
# IoU estimation
jaccard = batched_jaccard(self.gt_masks,
self.final_masks,
average_over_objects=False,
nb_objects=self.n_objects
) # frames, objid
IoU_over_eobj.append(jaccard)
anno_dict['annotated_masks'].append(self.final_masks[annotated_now]) # mask after modefied at the annotated frame
if self.max_nb_interactions == len(anno_dict['frames']): # After Lastround -> total 90 iter
seq_scrid_name = self.sequence + str(scr_id)
# IoU manager
IoU_over_eobj = np.stack(IoU_over_eobj, axis=0) # niact,frames,n_obj
IoUeveryround_perobj = np.mean(IoU_over_eobj, axis=1) # niact,n_obj
output_dict['average_iact_iou'] += np.sum(IoU_over_eobj[list(range(n_interaction)), anno_dict['frames']], axis=-1)
output_dict['annotated_frames'][seq_scrid_name] = anno_dict['frames']
# write csv
for obj_idx in range(self.n_objects):
with open(self.save_csvsummary_dir, mode='a') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow([self.sequence, str(obj_idx + 1), str(scr_id)] + list(IoUeveryround_perobj[:, obj_idx]))
summary = sess.get_global_summary(save_file=self.save_res_dir + '/summary_' + sess.report_name[7:] + '.json')
analyze_summary(self.save_res_dir + '/summary_' + sess.report_name[7:] + '.json', metric=metric)
# final_IOU = summary['curve'][metric][-1]
average_IoU_per_round = summary['curve'][metric][1:-1]
torch.cuda.empty_cache()
model = None
return average_IoU_per_round
def run_VOS_singleiact(self, n_interaction, scribbles_data, annotated_frames):
annotated_frames_np = np.array(annotated_frames)
num_workers = 4
annotated_now = annotated_frames[-1]
scribbles_list = scribbles_data['scribbles']
seq_name = scribbles_data['sequence']
output_masks = self.final_masks.copy().astype(np.float64)
prop_list = utils.get_prop_list(annotated_frames, annotated_now, self.num_frames, proportion=self.config.test_propagation_proportion)
prop_fore = sorted(prop_list)[0]
prop_rear = sorted(prop_list)[-1]
# Interaction settings
pm_ps_ns_3ch_t = [] # n_obj,3,h,w
if n_interaction == 1:
for obj_id in range(1, self.n_objects + 1):
pos_scrimg = utils.scribble_to_image(scribbles_list, annotated_now, obj_id,
dilation=self.config.scribble_dilation_param,
prev_mask=self.final_masks[annotated_now])
pm_ps_ns_3ch_t.append(np.stack([np.ones_like(pos_scrimg) / 2, pos_scrimg, np.zeros_like(pos_scrimg)], axis=0))
pm_ps_ns_3ch_t = np.stack(pm_ps_ns_3ch_t, axis=0) # n_obj,3,h,w
# Image.fromarray((scr_img[:, :, 1] * 255).astype(np.uint8)).save('/home/six/Desktop/CVPRW_figure/judo_obj1_scr.png')
else:
for obj_id in range(1, self.n_objects + 1):
prev_round_input = (self.final_masks[annotated_now] == obj_id).astype(np.float32) # H,W
pos_scrimg, neg_scrimg = utils.scribble_to_image(scribbles_list, annotated_now, obj_id,
dilation=self.config.scribble_dilation_param,
prev_mask=self.final_masks[annotated_now], blur=True,
singleimg=False, seperate_pos_neg=True)
pm_ps_ns_3ch_t.append(np.stack([prev_round_input, pos_scrimg, neg_scrimg], axis=0))
pm_ps_ns_3ch_t = np.stack(pm_ps_ns_3ch_t, axis=0) # n_obj,3,h,w
pm_ps_ns_3ch_t = torch.from_numpy(pm_ps_ns_3ch_t).cuda()
if (prop_list[0] != annotated_now) and (prop_list.count(annotated_now) != 2):
print(str(prop_list))
raise NotImplementedError
print(str(prop_list)) # we made our proplist first backward, and then forward
composed_transforms = transforms.Compose([tr.Normalize_ApplymeanvarImage(self.config.mean, self.config.var),
tr.ToTensor()])
db_test = davis2017_torchdataset.DAVIS2017(split='val', transform=composed_transforms, root=self.config.davis_dataset_dir,
custom_frames=prop_list, seq_name=seq_name, rgb=True,
obj_id=None, no_gt=True, retname=True, prev_round_masks=self.final_masks, )
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=num_workers, pin_memory=True)
flag = 0 # 1: propagating backward, 2: propagating forward
print('[{:01d} round] processing...'.format(n_interaction))
for ii, batched in enumerate(testloader):
# batched : image, scr_img, 0~fr, meta
inpdict = dict()
operating_frame = int(batched['meta']['frame_id'][0])
for inp in batched:
if inp == 'meta': continue
inpdict[inp] = Variable(batched[inp]).cuda()
inpdict['image'] = inpdict['image'].expand(self.n_objects, -1, -1, -1)
#################### Iaction ########################
if operating_frame == annotated_now: # Check the round is on interaction
if flag == 0:
flag += 1
adjacent_to_anno = True
elif flag == 1:
flag += 1
adjacent_to_anno = True
continue
else:
raise NotImplementedError
pm_ps_ns_3ch_t = torch.nn.ReflectionPad2d(self.pad_info[1] + self.pad_info[0])(pm_ps_ns_3ch_t)
inputs = torch.cat([inpdict['image'], pm_ps_ns_3ch_t], dim=1)
output_logit, anno_6chEnc_r5 = self.net.forward_ANet(inputs) # [nobj, 1, P_H, P_W], # [n_obj,2048,h/16,w/16]
output_prob_anno = torch.sigmoid(output_logit)
prob_onehot_t = output_prob_anno[:, 0].detach()
anno_3chEnc_r5, _, _, r2_prev_fromanno = self.net.encoder_3ch.forward(inpdict['image'])
self.anno_6chEnc_r5_list.append(anno_6chEnc_r5)
self.anno_3chEnc_r5_list.append(anno_3chEnc_r5)
if len(self.anno_6chEnc_r5_list) != len(annotated_frames):
raise NotImplementedError
#################### Propagation ########################
else:
# Flag [1: propagating backward, 2: propagating forward]
if adjacent_to_anno:
r2_prev = r2_prev_fromanno
predmask_prev = output_prob_anno
else:
predmask_prev = output_prob_prop
adjacent_to_anno = False
output_logit, r2_prev = self.net.forward_TNet(
self.anno_3chEnc_r5_list, inpdict['image'], self.anno_6chEnc_r5_list, r2_prev, predmask_prev) # [nobj, 1, P_H, P_W]
output_prob_prop = torch.sigmoid(output_logit)
prob_onehot_t = output_prob_prop[:, 0].detach()
smallest_alpha = 0.5
if flag == 1:
sorted_frames = annotated_frames_np[annotated_frames_np < annotated_now]
if len(sorted_frames) == 0:
alpha = 1
else:
closest_addianno_frame = np.max(sorted_frames)
alpha = smallest_alpha + (1 - smallest_alpha) * (
(operating_frame - closest_addianno_frame) / (annotated_now - closest_addianno_frame))
else:
sorted_frames = annotated_frames_np[annotated_frames_np > annotated_now]
if len(sorted_frames) == 0:
alpha = 1
else:
closest_addianno_frame = np.min(sorted_frames)
alpha = smallest_alpha + (1 - smallest_alpha) * (
(closest_addianno_frame - operating_frame) / (closest_addianno_frame - annotated_now))
prob_onehot_t = (alpha * prob_onehot_t) + ((1 - alpha) * self.prob_map_of_frames[operating_frame])
# Final mask indexing
self.prob_map_of_frames[operating_frame] = prob_onehot_t
output_masks[prop_fore:prop_rear + 1] = \
utils_torch.combine_masks_with_batch(self.prob_map_of_frames[prop_fore:prop_rear + 1],
n_obj=self.n_objects, th=self.config.test_propth
)[:, 0, self.hpad1:-self.hpad2, self.wpad1:-self.wpad2].cpu().numpy().astype(np.float) # f,h,w
torch.cuda.empty_cache()
return output_masks
if __name__ == '__main__':
config = Config()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(config.test_gpu_id)
tester = Main_tester(config)
tester.run_for_diverse_metrics()
# try:main_val(model,
# Config,
# min_nb_nodes= min_nb_nodes,
# simplyfied_testset= simplyfied_test,tr(config.test_gpu_id)
# metric = metric)
# except: continue
| 17,800 | 50.005731 | 147 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/ltm_transfer.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class LTM_transfer(nn.Module):
def __init__(self,md=4, stride=1):
super(LTM_transfer, self).__init__()
self.md = md #displacement (default = 4pixels)
self.range = (md*2 + 1) ** 2 #(default = (4x2+1)**2 = 81)
self.grid = None
self.Channelwise_sum = None
d_u = torch.linspace(-self.md * stride, self.md * stride, 2 * self.md + 1).view(1, -1).repeat((2 * self.md + 1, 1)).view(self.range, 1) # (25,1)
d_v = torch.linspace(-self.md * stride, self.md * stride, 2 * self.md + 1).view(-1, 1).repeat((1, 2 * self.md + 1)).view(self.range, 1) # (25,1)
self.d = torch.cat((d_u, d_v), dim=1).cuda() # (25,2)
def L2normalize(self, x, d=1):
eps = 1e-6
norm = x ** 2
norm = norm.sum(dim=d, keepdim=True) + eps
norm = norm ** (0.5)
return (x/norm)
def UniformGrid(self, Input):
'''
Make uniform grid
:param Input: tensor(N,C,H,W)
:return grid: (1,2,H,W)
'''
# torchHorizontal = torch.linspace(-1.0, 1.0, W).view(1, 1, 1, W).expand(N, 1, H, W)
# torchVertical = torch.linspace(-1.0, 1.0, H).view(1, 1, H, 1).expand(N, 1, H, W)
# grid = torch.cat([torchHorizontal, torchVertical], 1).cuda()
_, _, H, W = Input.size()
# mesh grid
xx = torch.arange(0, W).view(1, 1, 1, W).expand(1, 1, H, W)
yy = torch.arange(0, H).view(1, 1, H, 1).expand(1, 1, H, W)
grid = torch.cat((xx, yy), 1).float()
if Input.is_cuda:
grid = grid.cuda()
return grid
def warp(self, x, BM_d):
vgrid = self.grid + BM_d # [N2HW] # [(2d+1)^2, 2, H, W]
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :] / max(x.size(3) - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :] / max(x.size(2) - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = nn.functional.grid_sample(x, vgrid, mode='bilinear', padding_mode = 'border') #800MB memory occupied (d=2,C=64,H=256,W=256)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid) #300MB memory occpied (d=2,C=64,H=256,W=256)
mask = mask.masked_fill_(mask<0.999,0)
mask = mask.masked_fill_(mask>0,1)
return output * mask
def forward(self,sim_feature, f_map, apply_softmax_on_simfeature = True):
'''
Return bilateral cost volume(Set of bilateral correlation map)
:param sim_feature: Correlation feature based on operating frame's HW (N,D2,H,W)
:param f_map: Previous frame mask (N,1,H,W)
:return Correlation Cost: (N,(2d+1)^2,H,W)
'''
# feature1 = self.L2normalize(feature1)
# feature2 = self.L2normalize(feature2)
B_size,C_size,H_size,W_size = f_map.size()
if self.grid is None:
# Initialize first uniform grid
self.grid = self.UniformGrid(f_map)
if H_size != self.grid.size(2) or W_size != self.grid.size(3):
# Update uniform grid to fit on input tensor shape
self.grid = self.UniformGrid(f_map)
# Displacement volume (N,(2d+1)^2,2,H,W) d = (i,j) , i in [-md,md] & j in [-md,md]
D_vol = self.d.view(self.range, 2, 1, 1).expand(-1, -1, H_size, W_size) # [(2d+1)^2, 2, H, W]
if apply_softmax_on_simfeature:
sim_feature = F.softmax(sim_feature, dim=1) # B,D^2,H,W
f_map = self.warp(f_map.transpose(0, 1).expand(self.range,-1,-1,-1), D_vol).transpose(0, 1) # B,D^2,H,W
f_map = torch.sum(torch.mul(sim_feature, f_map),dim=1, keepdim=True) # B,1,H,W
return f_map # B,1,H,W
| 3,767 | 38.25 | 153 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/atnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.deeplab.aspp import ASPP
from networks.deeplab.backbone.resnet import SEResNet50
from networks.correlation_package.correlation import Correlation
from networks.ltm_transfer import LTM_transfer
class ATnet(nn.Module):
def __init__(self, pretrained=1, resfix=False, corr_displacement=4, corr_stride=2):
super(ATnet, self).__init__()
print("Constructing ATnet architecture..")
self.encoder_6ch = Encoder_6ch(resfix)
self.encoder_3ch = Encoder_3ch(resfix)
self.indicator_encoder = ConverterEncoder() #
self.decoder_iact = Decoder()
self.decoder_prop = Decoder_prop()
self.ltm_local_affinity = Correlation(pad_size=corr_displacement * corr_stride, kernel_size=1,
max_displacement=corr_displacement * corr_stride,
stride1=1, stride2=corr_stride, corr_multiply=1)
self.ltm_transfer = LTM_transfer(md=corr_displacement, stride=corr_stride)
self.prev_conv1x1 = nn.Conv2d(256, 256, kernel_size=1, padding=0) # 1/4, 256
self.conv1x1 = nn.Conv2d(2048*2, 2048, kernel_size=1, padding=0) # 1/16, 2048
self.refer_weight = None
self._initialize_weights(pretrained)
def forward_ANet(self, x): # Bx4xHxW to Bx1xHxW
r5, r4, r3, r2 = self.encoder_6ch(x)
estimated_mask, m2 = self.decoder_iact(r5, r3, r2, only_return_feature=False)
r5_indicator = self.indicator_encoder(r5, m2)
return estimated_mask, r5_indicator
def forward_TNet(self, anno_propEnc_r5_list, targframe_3ch, anno_iactEnc_r5_list, r2_prev, predmask_prev, debug_f_mask = False): #1/16, 2048
f_targ, _, r3_targ, r2_targ = self.encoder_3ch(targframe_3ch)
f_mask_r5 = self.correlation_global_transfer(anno_propEnc_r5_list, f_targ, anno_iactEnc_r5_list) # 1/16, 2048
r2_targ_c = self.prev_conv1x1(r2_targ)
r2_prev = self.prev_conv1x1(r2_prev)
f_mask_r2 = self.correlation_local_transfer(r2_prev, r2_targ_c, predmask_prev) # 1/4, 1 [B,1,H/4,W/4]
r5_concat = torch.cat([f_targ, f_mask_r5], dim=1) # 1/16, 2048*2
r5_concat = self.conv1x1(r5_concat)
estimated_mask, m2 = self.decoder_prop(r5_concat, r3_targ, r2_targ, f_mask_r2)
if not debug_f_mask:
return estimated_mask, r2_targ
else:
return estimated_mask, r2_targ, f_mask_r2
def correlation_global_transfer(self, anno_feature_list, targ_feature, anno_indicator_feature_list ):
'''
:param anno_feature_list: [B,C,H,W] x list (N values in list)
:param targ_feature: [B,C,H,W]
:param anno_indicator_feature_list: [B,C,H,W] x list (N values in list)
:return targ_mask_feature: [B,C,H,W]
'''
b, c, h, w = anno_indicator_feature_list[0].size() # b means n_objs
targ_feature = targ_feature.view(b, c, h * w) # [B, C, HxW]
n_features = len(anno_feature_list)
anno_feature = []
for f_idx in range(n_features):
anno_feature.append(anno_feature_list[f_idx].view(b, c, h * w).transpose(1, 2)) # [B, HxW', C]
anno_feature = torch.cat(anno_feature, dim=1) # [B, NxHxW', C]
sim_feature = torch.bmm(anno_feature, targ_feature) # [B, NxHxW', HxW]
sim_feature = F.softmax(sim_feature, dim=2) / n_features # [B, NxHxW', HxW]
anno_indicator_feature = []
for f_idx in range(n_features):
anno_indicator_feature.append(anno_indicator_feature_list[f_idx].view(b, c, h * w)) # [B, C, HxW']
anno_indicator_feature = torch.cat(anno_indicator_feature, dim=-1) # [B, C, NxHxW']
targ_mask_feature = torch.bmm(anno_indicator_feature, sim_feature) # [B, C, HxW]
targ_mask_feature = targ_mask_feature.view(b, c, h, w)
return targ_mask_feature
def correlation_local_transfer(self, r2_prev, r2_targ, predmask_prev):
'''
:param r2_prev: [B,C,H,W]
:param r2_targ: [B,C,H,W]
:param predmask_prev: [B,1,4*H,4*W]
:return targ_mask_feature_r2: [B,1,H,W]
'''
predmask_prev = F.interpolate(predmask_prev, scale_factor=0.25, mode='bilinear',align_corners=True) # B,1,H,W
sim_feature = self.ltm_local_affinity.forward(r2_targ,r2_prev,) # B,D^2,H,W
sim_feature = F.softmax(sim_feature, dim=2) # B,D^2,H,W
predmask_targ = self.ltm_transfer.forward(sim_feature, predmask_prev, apply_softmax_on_simfeature = False) # B,1,H,W
return predmask_targ
def _initialize_weights(self, pretrained):
for m in self.modules():
if pretrained:
break
else:
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.001)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class Encoder_3ch(nn.Module):
# T-Net Encoder
def __init__(self, resfix):
super(Encoder_3ch, self).__init__()
self.conv0_3ch = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=True)
resnet = SEResNet50(output_stride=16, BatchNorm=nn.BatchNorm2d, pretrained=True)
self.bn1 = resnet.bn1
self.relu = resnet.relu # 1/2, 64
self.maxpool = resnet.maxpool
self.res2 = resnet.layer1 # 1/4, 256
self.res3 = resnet.layer2 # 1/8, 512
self.res4 = resnet.layer3 # 1/16, 1024
self.res5 = resnet.layer4 # 1/16, 2048
# freeze BNs
if resfix:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
x = self.conv0_3ch(x) # 1/2, 64
x = self.bn1(x)
c1 = self.relu(x) # 1/2, 64
x = self.maxpool(c1) # 1/4, 64
r2 = self.res2(x) # 1/4, 256
r3 = self.res3(r2) # 1/8, 512
r4 = self.res4(r3) # 1/16, 1024
r5 = self.res5(r4) # 1/16, 2048
return r5, r4, r3, r2
def forward_r2(self,x):
x = self.conv0_3ch(x) # 1/2, 64
x = self.bn1(x)
c1 = self.relu(x) # 1/2, 64
x = self.maxpool(c1) # 1/4, 64
r2 = self.res2(x) # 1/4, 256
return r2
class Encoder_6ch(nn.Module):
# A-Net Encoder
def __init__(self, resfix):
super(Encoder_6ch, self).__init__()
self.conv0_6ch = nn.Conv2d(6, 64, kernel_size=7, stride=2, padding=3, bias=True)
resnet = SEResNet50(output_stride=16, BatchNorm=nn.BatchNorm2d, pretrained=True)
self.bn1 = resnet.bn1
self.relu = resnet.relu # 1/2, 64
self.maxpool = resnet.maxpool
self.res2 = resnet.layer1 # 1/4, 256
self.res3 = resnet.layer2 # 1/8, 512
self.res4 = resnet.layer3 # 1/16, 1024
self.res5 = resnet.layer4 # 1/16, 2048
# freeze BNs
if resfix:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
x = self.conv0_6ch(x) # 1/2, 64
x = self.bn1(x)
c1 = self.relu(x) # 1/2, 64
x = self.maxpool(c1) # 1/4, 64
r2 = self.res2(x) # 1/4, 256
r3 = self.res3(r2) # 1/8, 512
r4 = self.res4(r3) # 1/16, 1024
r5 = self.res5(r4) # 1/16, 2048
return r5, r4, r3, r2
class Decoder(nn.Module):
# A-Net Decoder
def __init__(self):
super(Decoder, self).__init__()
mdim = 256
self.aspp_decoder = ASPP(backbone='res', output_stride=16, BatchNorm=nn.BatchNorm2d, pretrained=1)
self.convG0 = nn.Conv2d(2048, mdim, kernel_size=3, padding=1)
self.convG1 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.convG2 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.RF3 = Refine(512, mdim) # 1/16 -> 1/8
self.RF2 = Refine(256, mdim) # 1/8 -> 1/4
self.lastconv = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(256, 1, kernel_size=1, stride=1))
def forward(self, r5, r3_targ, r2_targ, only_return_feature = False):
aspp_out = self.aspp_decoder(r5) #1/16 mdim
aspp_out = F.interpolate(aspp_out, scale_factor=4, mode='bilinear',align_corners=True) #1/4 mdim
m4 = self.convG0(F.relu(r5)) # out: # 1/16, mdim
m4 = self.convG1(F.relu(m4)) # out: # 1/16, mdim
m4 = self.convG2(F.relu(m4)) # out: # 1/16, mdim
m3 = self.RF3(r3_targ, m4) # out: 1/8, mdim
m2 = self.RF2(r2_targ, m3) # out: 1/4, mdim
m2 = torch.cat((m2, aspp_out), dim=1) # out: 1/4, mdim*2
if only_return_feature:
return m2
x = self.lastconv(m2)
x = F.interpolate(x, scale_factor=4, mode='bilinear', align_corners=True)
return x, m2
class Decoder_prop(nn.Module):
# T-Net Decoder
def __init__(self):
super(Decoder_prop, self).__init__()
mdim = 256
self.aspp_decoder = ASPP(backbone='res', output_stride=16, BatchNorm=nn.BatchNorm2d, pretrained=1)
self.convG0 = nn.Conv2d(2048, mdim, kernel_size=3, padding=1)
self.convG1 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.convG2 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.RF3 = Refine(512, mdim) # 1/16 -> 1/8
self.RF2 = Refine(256, mdim) # 1/8 -> 1/4
self.lastconv = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(256, 1, kernel_size=1, stride=1))
def forward(self, r5, r3_targ, r2_targ, f_mask_r2):
aspp_out = self.aspp_decoder(r5) #1/16 mdim
aspp_out = F.interpolate(aspp_out, scale_factor=4, mode='bilinear',align_corners=True) #1/4 mdim
m4 = self.convG0(F.relu(r5)) # out: # 1/16, mdim
m4 = self.convG1(F.relu(m4)) # out: # 1/16, mdim
m4 = self.convG2(F.relu(m4)) # out: # 1/16, mdim
m3 = self.RF3(r3_targ, m4) # out: 1/8, mdim
m3 = m3 + 0.5 * F.interpolate(f_mask_r2, scale_factor=0.5, mode='bilinear',align_corners=True) #1/4 mdim
m2 = self.RF2(r2_targ, m3) # out: 1/4, mdim
m2 = m2 + 0.5 * f_mask_r2
m2 = torch.cat((m2, aspp_out), dim=1) # out: 1/4, mdim*2
x = self.lastconv(m2)
x = F.interpolate(x, scale_factor=4, mode='bilinear', align_corners=True)
return x, m2
class ConverterEncoder(nn.Module):
def __init__(self):
super(ConverterEncoder, self).__init__()
# [1/4, 512] to [1/8, 1024]
downsample1 = nn.Sequential(nn.Conv2d(512, 1024, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(1024),
)
self.block1 = SEBottleneck(512, 256, stride = 2, downsample = downsample1)
# [1/8, 1024] to [1/16, 2048]
downsample2 = nn.Sequential(nn.Conv2d(1024, 2048, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(2048),
)
self.block2 = SEBottleneck(1024, 512, stride = 2, downsample=downsample2)
self.conv1x1 = nn.Conv2d(2048 * 2, 2048, kernel_size=1, padding=0) # 1/16, 2048
def forward(self, r5, m2):
'''
:param r5: 1/16, 2048
:param m2: 1/4, 512
:return:
'''
x = self.block1(m2)
x = self.block2(x)
x = torch.cat((x,r5),dim=1)
x = self.conv1x1(x)
return x
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=nn.BatchNorm2d):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
# SE
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.conv_down = nn.Conv2d(
planes * 4, planes // 4, kernel_size=1, bias=False)
self.conv_up = nn.Conv2d(
planes // 4, planes * 4, kernel_size=1, bias=False)
self.sig = nn.Sigmoid()
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out1 = self.global_pool(out)
out1 = self.conv_down(out1)
out1 = self.relu(out1)
out1 = self.conv_up(out1)
out1 = self.sig(out1)
if self.downsample is not None:
residual = self.downsample(x)
res = out1 * out + residual
res = self.relu(res)
return res
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class Refine(nn.Module):
def __init__(self, inplanes, planes, scale_factor=2):
super(Refine, self).__init__()
self.convFS1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1)
self.convFS2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.convFS3 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.convMM1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.convMM2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.scale_factor = scale_factor
def forward(self, f, pm):
s = self.convFS1(f)
sr = self.convFS2(F.relu(s))
sr = self.convFS3(F.relu(sr))
s = s + sr
m = s + F.interpolate(pm, scale_factor=self.scale_factor, mode='bilinear',align_corners=True)
mr = self.convMM1(F.relu(m))
mr = self.convMM2(F.relu(mr))
m = m + mr
return m
| 16,091 | 38.153285 | 144 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/__init__.py | # | 1 | 1 | 1 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/aspp.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.deeplab.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class _ASPPModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm, pretrained):
super(_ASPPModule, self).__init__()
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm(planes)
self.relu = nn.ReLU()
self._init_weight(pretrained)
def forward(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self,pretrained):
for m in self.modules():
if pretrained:
break
else:
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class ASPP(nn.Module):
def __init__(self, backbone, output_stride, BatchNorm, pretrained):
super(ASPP, self).__init__()
if backbone == 'drn':
inplanes = 512
elif backbone == 'mobilenet':
inplanes = 320
else:
inplanes = 2048
if output_stride == 16:
dilations = [1, 6, 12, 18]
elif output_stride == 8:
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm, pretrained=pretrained)
self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm, pretrained=pretrained)
self.aspp3 = _ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm, pretrained=pretrained)
self.aspp4 = _ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm, pretrained=pretrained)
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),
BatchNorm(256),
nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = BatchNorm(256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
self._init_weight(pretrained)
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
# if type(x4.size()[2]) != int:
# tmpsize = (x4.size()[2].item(),x4.size()[3].item())
# else:
# tmpsize = (x4.size()[2],x4.size()[3])
# x5 = F.interpolate(x5, size=(14,14), mode='bilinear', align_corners=True)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return self.dropout(x)
def _init_weight(self,pretrained):
for m in self.modules():
if pretrained:
break
else:
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_aspp(backbone, output_stride, BatchNorm,pretrained):
return ASPP(backbone, output_stride, BatchNorm, pretrained) | 4,257 | 38.425926 | 139 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/decoder.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.deeplab.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class Decoder(nn.Module):
def __init__(self, num_classes, backbone, BatchNorm):
super(Decoder, self).__init__()
if backbone == 'resnet' or backbone == 'drn':
low_level_inplanes = 256
elif backbone == 'xception':
low_level_inplanes = 128
elif backbone == 'mobilenet':
low_level_inplanes = 24
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(low_level_inplanes, 48, 1, bias=False)
self.bn1 = BatchNorm(48)
self.relu = nn.ReLU()
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
self._init_weight()
def forward(self, x, low_level_feat):
low_level_feat = self.conv1(low_level_feat)
low_level_feat = self.bn1(low_level_feat)
low_level_feat = self.relu(low_level_feat)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, low_level_feat), dim=1)
x = self.last_conv(x)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_decoder(num_classes, backbone, BatchNorm):
return Decoder(num_classes, backbone, BatchNorm) | 2,280 | 39.017544 | 107 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/deeplab.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.deeplab.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from networks.deeplab.aspp import build_aspp
from networks.deeplab.decoder import build_decoder
from networks.deeplab.backbone import build_backbone
class DeepLab(nn.Module):
def __init__(self, backbone='resnet', output_stride=16, num_classes=21,
sync_bn=True, freeze_bn=False):
super(DeepLab, self).__init__()
if backbone == 'drn':
output_stride = 8
if sync_bn == True:
BatchNorm = SynchronizedBatchNorm2d
else:
BatchNorm = nn.BatchNorm2d
self.backbone = build_backbone(backbone, output_stride, BatchNorm)
self.aspp = build_aspp(backbone, output_stride, BatchNorm)
self.decoder = build_decoder(num_classes, backbone, BatchNorm)
if freeze_bn:
self.freeze_bn()
def forward(self, input):
x, low_level_feat = self.backbone(input)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, SynchronizedBatchNorm2d):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
modules = [self.aspp, self.decoder]
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
if __name__ == "__main__":
model = DeepLab(backbone='mobilenet', output_stride=16)
model.eval()
input = torch.rand(1, 3, 513, 513)
output = model(input)
print(output.size())
| 2,493 | 33.638889 | 93 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/__init__.py | # | 1 | 1 | 1 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/backbone/resnet.py | import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from networks.deeplab.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
# SE
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.conv_down = nn.Conv2d(
planes * 4, planes // 4, kernel_size=1, bias=False)
self.conv_up = nn.Conv2d(
planes // 4, planes * 4, kernel_size=1, bias=False)
self.sig = nn.Sigmoid()
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out1 = self.global_pool(out)
out1 = self.conv_down(out1)
out1 = self.relu(out1)
out1 = self.conv_up(out1)
out1 = self.sig(out1)
if self.downsample is not None:
residual = self.downsample(x)
res = out1 * out + residual
res = self.relu(res)
return res
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, BatchNorm, pretrained=True, modelname = 'res101'):
self.inplanes = 64
self.modelname = modelname
super(ResNet, self).__init__()
blocks = [1, 2, 4]
if output_stride == 16:
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
elif output_stride == 8:
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 4]
else:
raise NotImplementedError
# Modules
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
self._init_weight()
if pretrained:
self._load_pretrained_model()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample, BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=blocks[0]*dilation,
downsample=downsample, BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, len(blocks)):
layers.append(block(self.inplanes, planes, stride=1,
dilation=blocks[i]*dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x) #256 128 128
low_level_feat = x
x = self.layer2(x) #512 64 64
x = self.layer3(x) #1024 32 32
x = self.layer4(x) #2048 32 32
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
if self.modelname =='res101':
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')
elif self.modelname == 'res50':
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet50-19c8e357.pth')
elif self.modelname == 'SEres50':
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet50-19c8e357.pth')
else: raise NotImplementedError
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def ResNet101(output_stride, BatchNorm, pretrained=True,):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, BatchNorm, pretrained=pretrained, modelname='res101')
return model
def ResNet50(output_stride, BatchNorm, pretrained=True):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], output_stride, BatchNorm, pretrained=pretrained, modelname='res50')
return model
def SEResNet50(output_stride, BatchNorm, pretrained=True):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(SEBottleneck, [3, 4, 6, 3], output_stride, BatchNorm, pretrained=pretrained, modelname='SEres50')
return model
if __name__ == "__main__":
import torch
model = ResNet50(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=16)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size()) | 9,076 | 36.979079 | 130 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/backbone/drn.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from networks.deeplab.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
webroot = 'https://tigress-web.princeton.edu/~fy/drn/models/'
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'drn-c-26': webroot + 'drn_c_26-ddedf421.pth',
'drn-c-42': webroot + 'drn_c_42-9d336e8c.pth',
'drn-c-58': webroot + 'drn_c_58-0a53a92c.pth',
'drn-d-22': webroot + 'drn_d_22-4bd2f8ea.pth',
'drn-d-38': webroot + 'drn_d_38-eebb45f0.pth',
'drn-d-54': webroot + 'drn_d_54-0e0534ff.pth',
'drn-d-105': webroot + 'drn_d_105-12b40979.pth'
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True, BatchNorm=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride,
padding=dilation[0], dilation=dilation[0])
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes,
padding=dilation[1], dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.residual:
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation[1], bias=False,
dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class DRN(nn.Module):
def __init__(self, block, layers, arch='D',
channels=(16, 32, 64, 128, 256, 512, 512, 512),
BatchNorm=None):
super(DRN, self).__init__()
self.inplanes = channels[0]
self.out_dim = channels[-1]
self.arch = arch
if arch == 'C':
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False)
self.bn1 = BatchNorm(channels[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(
BasicBlock, channels[0], layers[0], stride=1, BatchNorm=BatchNorm)
self.layer2 = self._make_layer(
BasicBlock, channels[1], layers[1], stride=2, BatchNorm=BatchNorm)
elif arch == 'D':
self.layer0 = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3,
bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True)
)
self.layer1 = self._make_conv_layers(
channels[0], layers[0], stride=1, BatchNorm=BatchNorm)
self.layer2 = self._make_conv_layers(
channels[1], layers[1], stride=2, BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2, BatchNorm=BatchNorm)
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2, BatchNorm=BatchNorm)
self.layer5 = self._make_layer(block, channels[4], layers[4],
dilation=2, new_level=False, BatchNorm=BatchNorm)
self.layer6 = None if layers[5] == 0 else \
self._make_layer(block, channels[5], layers[5], dilation=4,
new_level=False, BatchNorm=BatchNorm)
if arch == 'C':
self.layer7 = None if layers[6] == 0 else \
self._make_layer(BasicBlock, channels[6], layers[6], dilation=2,
new_level=False, residual=False, BatchNorm=BatchNorm)
self.layer8 = None if layers[7] == 0 else \
self._make_layer(BasicBlock, channels[7], layers[7], dilation=1,
new_level=False, residual=False, BatchNorm=BatchNorm)
elif arch == 'D':
self.layer7 = None if layers[6] == 0 else \
self._make_conv_layers(channels[6], layers[6], dilation=2, BatchNorm=BatchNorm)
self.layer8 = None if layers[7] == 0 else \
self._make_conv_layers(channels[7], layers[7], dilation=1, BatchNorm=BatchNorm)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1,
new_level=True, residual=True, BatchNorm=None):
assert dilation == 1 or dilation % 2 == 0
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = list()
layers.append(block(
self.inplanes, planes, stride, downsample,
dilation=(1, 1) if dilation == 1 else (
dilation // 2 if new_level else dilation, dilation),
residual=residual, BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, residual=residual,
dilation=(dilation, dilation), BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_conv_layers(self, channels, convs, stride=1, dilation=1, BatchNorm=None):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(self.inplanes, channels, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(channels),
nn.ReLU(inplace=True)])
self.inplanes = channels
return nn.Sequential(*modules)
def forward(self, x):
if self.arch == 'C':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
elif self.arch == 'D':
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
low_level_feat = x
x = self.layer4(x)
x = self.layer5(x)
if self.layer6 is not None:
x = self.layer6(x)
if self.layer7 is not None:
x = self.layer7(x)
if self.layer8 is not None:
x = self.layer8(x)
return x, low_level_feat
class DRN_A(nn.Module):
def __init__(self, block, layers, BatchNorm=None):
self.inplanes = 64
super(DRN_A, self).__init__()
self.out_dim = 512 * block.expansion
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, BatchNorm=BatchNorm)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, BatchNorm=BatchNorm)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
dilation=(dilation, dilation, ), BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def drn_a_50(BatchNorm, pretrained=True):
model = DRN_A(Bottleneck, [3, 4, 6, 3], BatchNorm=BatchNorm)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def drn_c_26(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-26'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_c_42(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-42'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_c_58(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-58'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_22(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-22'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_24(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 2, 2], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-24'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_38(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-38'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_40(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-40'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_54(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-54'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_105(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-105'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
if __name__ == "__main__":
import torch
model = drn_a_50(BatchNorm=nn.BatchNorm2d, pretrained=True)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| 14,657 | 35.372208 | 100 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/backbone/__init__.py | from networks.deeplab.backbone import resnet, xception, drn, mobilenet
def build_backbone(backbone, output_stride, BatchNorm):
if backbone == 'resnet':
return resnet.ResNet101(output_stride, BatchNorm)
elif backbone == 'xception':
return xception.AlignedXception(output_stride, BatchNorm)
elif backbone == 'drn':
return drn.drn_d_54(BatchNorm)
elif backbone == 'mobilenet':
return mobilenet.MobileNetV2(output_stride, BatchNorm)
else:
raise NotImplementedError
| 522 | 36.357143 | 70 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/backbone/xception.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from networks.deeplab.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, BatchNorm=None):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,
groups=inplanes, bias=bias)
self.bn = BatchNorm(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, BatchNorm=None,
start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = BatchNorm(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters, filters, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride != 1:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 2, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride == 1 and is_last:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 1, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if not start_with_relu:
rep = rep[1:]
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x = x + skip
return x
class AlignedXception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, output_stride, BatchNorm,
pretrained=True):
super(AlignedXception, self).__init__()
if output_stride == 16:
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = BatchNorm(64)
self.block1 = Block(64, 128, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False)
self.block2 = Block(128, 256, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False,
grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, BatchNorm=BatchNorm,
start_with_relu=True, grow_first=True, is_last=True)
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0],
BatchNorm=BatchNorm, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn3 = BatchNorm(1536)
self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn4 = BatchNorm(1536)
self.conv5 = SeparableConv2d(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn5 = BatchNorm(2048)
# Init weights
self._init_weight()
# Load pretrained model
if pretrained:
self._load_pretrained_model()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# add relu here
x = self.relu(x)
low_level_feat = x
x = self.block2(x)
x = self.block3(x)
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in model_dict:
if 'pointwise' in k:
v = v.unsqueeze(-1).unsqueeze(-1)
if k.startswith('block11'):
model_dict[k] = v
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
if __name__ == "__main__":
import torch
model = AlignedXception(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=16)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size()) | 11,561 | 39.145833 | 116 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/backbone/mobilenet.py | import torch
import torch.nn.functional as F
import torch.nn as nn
import math
from networks.deeplab.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
import torch.utils.model_zoo as model_zoo
def conv_bn(inp, oup, stride, BatchNorm):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
BatchNorm(oup),
nn.ReLU6(inplace=True)
)
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, dilation, expand_ratio, BatchNorm):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
self.kernel_size = 3
self.dilation = dilation
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),
BatchNorm(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, 1, bias=False),
BatchNorm(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, 1, bias=False),
BatchNorm(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),
BatchNorm(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, bias=False),
BatchNorm(oup),
)
def forward(self, x):
x_pad = fixed_padding(x, self.kernel_size, dilation=self.dilation)
if self.use_res_connect:
x = x + self.conv(x_pad)
else:
x = self.conv(x_pad)
return x
class MobileNetV2(nn.Module):
def __init__(self, output_stride=8, BatchNorm=None, width_mult=1., pretrained=True):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
current_stride = 1
rate = 1
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
input_channel = int(input_channel * width_mult)
self.features = [conv_bn(3, input_channel, 2, BatchNorm)]
current_stride *= 2
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
if current_stride == output_stride:
stride = 1
dilation = rate
rate *= s
else:
stride = s
dilation = 1
current_stride *= s
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, stride, dilation, t, BatchNorm))
else:
self.features.append(block(input_channel, output_channel, 1, dilation, t, BatchNorm))
input_channel = output_channel
self.features = nn.Sequential(*self.features)
self._initialize_weights()
if pretrained:
self._load_pretrained_model()
self.low_level_features = self.features[0:4]
self.high_level_features = self.features[4:]
def forward(self, x):
low_level_feat = self.low_level_features(x)
x = self.high_level_features(low_level_feat)
return x, low_level_feat
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('http://jeff95.me/models/mobilenet_v2-6a65762b.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if __name__ == "__main__":
input = torch.rand(1, 3, 512, 512)
model = MobileNetV2(output_stride=16, BatchNorm=nn.BatchNorm2d)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| 5,398 | 34.519737 | 110 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/sync_batchnorm/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate | 3,218 | 35.579545 | 115 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/sync_batchnorm/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import numpy as np
from torch.autograd import Variable
def as_numpy(v):
if isinstance(v, Variable):
v = v.data
return v.cpu().numpy()
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3):
npa, npb = as_numpy(a), as_numpy(b)
self.assertTrue(
np.allclose(npa, npb, atol=atol),
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
)
| 834 | 26.833333 | 157 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/sync_batchnorm/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from .comm import SyncMaster
__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dementions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input) | 12,932 | 44.861702 | 116 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/sync_batchnorm/comm.py | # -*- coding: utf-8 -*-
# File : comm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import queue
import collections
import threading
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
class FutureResult(object):
"""A thread-safe future implementation. Used only as one-to-one pipe."""
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert self._result is None, 'Previous result has\'t been fetched.'
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if self._result is None:
self._cond.wait()
res = self._result
self._result = None
return res
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
class SlavePipe(_SlavePipeBase):
"""Pipe for master-slave communication."""
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
class SyncMaster(object):
"""An abstract `SyncMaster` object.
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
and passed to a registered callback.
- After receiving the messages, the master device should gather the information and determine to message passed
back to each slave devices.
"""
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
| 4,440 | 33.161538 | 117 | py |
IVOS-ATNet | IVOS-ATNet-master/networks/deeplab/sync_batchnorm/__init__.py | # -*- coding: utf-8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
from .replicate import DataParallelWithCallback, patch_replication_callback | 447 | 36.333333 | 96 | py |
IVOS-ATNet | IVOS-ATNet-master/libs/custom_transforms.py | import numpy as np
import torch
class Normalize_ApplymeanvarImage(object):
def __init__(self, mean, var, change_channels=False):
self.mean = mean
self.var = var
self.change_channels = change_channels
def __call__(self, sample):
for elem in sample.keys():
if 'image' in elem:
if self.change_channels:
sample[elem] = sample[elem][:, :, [2, 1, 0]]
sample[elem] = sample[elem].astype(np.float32)/255.0
sample[elem] = np.subtract(sample[elem], np.array(self.mean, dtype=np.float32))/np.array(self.var, dtype=np.float32)
return sample
def __str__(self):
return 'SubtractMeanImage'+str(self.mean)
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
for elem in sample.keys():
if 'meta' in elem:
continue
tmp = sample[elem]
if tmp.ndim == 2:
tmp = tmp[:, :, np.newaxis]
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
tmp = tmp.transpose((2, 0, 1))
sample[elem] = torch.from_numpy(tmp)
return sample
| 1,272 | 25.520833 | 132 | py |
IVOS-ATNet | IVOS-ATNet-master/libs/utils.py | import os
import numpy as np
import cv2
from davisinteractive.utils.operations import bresenham
def mkdir(paths):
if not isinstance(paths, (list, tuple)):
paths = [paths]
for path in paths:
if not os.path.isdir(path):
os.makedirs(path)
class logger:
def __init__(self, log_file):
self.log_file = log_file
def printNlog(self,str2print):
print(str2print)
with open(self.log_file, 'a') as f:
f.write(str2print + '\n')
f.close()
def apply_pad(img, padinfo=None):
if padinfo: # ((hpad,hpad),(wpad,wpad))
(hpad, wpad) = padinfo
if len(img.shape)==3 : pad_img = np.pad(img, (hpad, wpad, (0, 0)), mode='reflect') # H,W,3
else: pad_img = np.pad(img, (hpad, wpad), mode='reflect') #H,W
return pad_img
else:
h, w = img.shape[0:2]
new_h = h + 32 - h % 32
new_w = w + 32 - w % 32
# print(new_h, new_w)
lh, uh = (new_h - h) / 2, (new_h - h) / 2 + (new_h - h) % 2
lw, uw = (new_w - w) / 2, (new_w - w) / 2 + (new_w - w) % 2
lh, uh, lw, uw = int(lh), int(uh), int(lw), int(uw)
if len(img.shape)==3 : pad_img = np.pad(img, ((lh, uh), (lw, uw), (0, 0)), mode='reflect') # H,W,3
else: pad_img = np.pad(img, ((lh, uh), (lw, uw)), mode='reflect') # H,W
info = ((lh, uh), (lw, uw))
return pad_img, info
def get_prop_list(annotated_frames, annotated_now, num_frames, proportion = 1.0, get_close_anno_frames = False):
aligned_anno = sorted(annotated_frames)
overlap = aligned_anno.count(annotated_now)
for i in range(overlap):
aligned_anno.remove(annotated_now)
start_frame, end_frame = 0, num_frames -1
for i in range(len(aligned_anno)):
if aligned_anno[i] > annotated_now:
end_frame = aligned_anno[i] - 1
break
aligned_anno.reverse()
for i in range(len(aligned_anno)):
if aligned_anno[i] < annotated_now:
start_frame = aligned_anno[i]+1
break
if get_close_anno_frames:
close_frames_round=dict() # 1st column: iaction idx, 2nd column: the close frames
annotated_frames.reverse()
try: close_frames_round["left"] = len(annotated_frames) - annotated_frames.index(start_frame-1) - 1
except: print('No left annotated fr')
try: close_frames_round["right"] = len(annotated_frames) - annotated_frames.index(end_frame) - 1
except: print('No right annotated fr')
if proportion != 1.0:
if start_frame!=0:
start_frame = annotated_now - int((annotated_now-start_frame)*proportion + 0.5)
if end_frame != num_frames-1:
end_frame = annotated_now + int((end_frame - annotated_now) * proportion + 0.5)
prop_list = list(range(annotated_now,start_frame-1,-1)) + list(range(annotated_now,end_frame+1))
if len(prop_list)==0:
prop_list = [annotated_now]
if not get_close_anno_frames:
return prop_list
else:
return prop_list, close_frames_round
def scribble_to_image(scribbles, currentframe, obj_id, prev_mask, dilation=5,
nocare_area=None, bresenhamtf=True, blur=True, singleimg=False, seperate_pos_neg = False):
""" Make scrible to previous mask shaped numpyfile
"""
h,w = prev_mask.shape
regions2exclude_on_maskneg = prev_mask!=obj_id
mask = np.zeros([h,w])
mask_neg = np.zeros([h,w])
if singleimg:
scribbles=scribbles
else: scribbles = scribbles[currentframe]
for scribble in scribbles:
points_scribble = np.round(np.array(scribble['path']) * np.array((w, h))).astype(np.int)
if bresenhamtf and len(points_scribble) > 1:
all_points = bresenham(points_scribble)
else:
all_points = points_scribble
if obj_id==0:
raise NotImplementedError
else:
if scribble['object_id'] == obj_id:
mask[all_points[:, 1] - 1, all_points[:, 0] - 1] = 1
else:
mask_neg[all_points[:, 1] - 1, all_points[:, 0] - 1] = 1
# else:
# mask_neg[all_points[:, 1] - 1, all_points[:, 0] - 1] = 1
scr_gt, _ = scrimg_postprocess(mask, dilation=dilation, nocare_area=nocare_area, blur=blur, blursize=(5, 5))
scr_gt_neg, _ = scrimg_postprocess(mask_neg, dilation=dilation, nocare_area=nocare_area, blur=blur, blursize=(5, 5))
scr_gt_neg[regions2exclude_on_maskneg] = 0
if seperate_pos_neg:
return scr_gt.astype(np.float32), scr_gt_neg.astype(np.float32)
else:
scr_img = scr_gt - scr_gt_neg
return scr_img.astype(np.float32)
def scrimg_postprocess(scr, dilation=7, nocare_area=21, blur = False, blursize=(5, 5), var = 6.0, custom_blur = None):
# Compute foreground
if scr.max() == 1:
kernel_fg = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (dilation, dilation))
fg = cv2.dilate(scr.astype(np.uint8), kernel=kernel_fg).astype(scr.dtype)
else:
fg = scr
# Compute nocare area
if nocare_area is None:
nocare = None
else:
kernel_nc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (nocare_area, nocare_area))
nocare = cv2.dilate(fg, kernel=kernel_nc) - fg
if blur:
fg = cv2.GaussianBlur(fg,ksize=blursize,sigmaX=var)
elif custom_blur:
c_kernel = np.array([[1,2,3,2,1],[2,4,9,4,2],[3,9,64,9,3],[2,4,9,4,2],[1,2,3,2,1]])
c_kernel = c_kernel/np.sum(c_kernel)
fg = cv2.filter2D(fg,ddepth=-1,kernel = c_kernel)
return fg, nocare
| 5,600 | 35.37013 | 120 | py |
IVOS-ATNet | IVOS-ATNet-master/libs/__init__.py | # | 1 | 1 | 1 | py |
IVOS-ATNet | IVOS-ATNet-master/libs/utils_torch.py | import torch
def combine_masks_with_batch(masks, n_obj, th=0.5, return_as_onehot = False):
""" Combine mask for different objects.
Different methods are the following:
* `max_per_pixel`: Computes the final mask taking the pixel with the highest
probability for every object.
# Arguments
masks: Tensor with shape[B, nobj, H, W]. H, W on batches must be same
method: String. Method that specifies how the masks are fused.
# Returns
[B, 1, H, W]
"""
# masks : B, nobj, h, w
# output : h,w
marker = torch.argmax(masks, dim=1, keepdim=True) #
if not return_as_onehot:
out_mask = torch.unsqueeze(torch.zeros_like(masks)[:,0],1) #[B, 1, H, W]
for obj_id in range(n_obj):
try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th)
except: raise NotImplementedError
out_mask[tmp_mask] = obj_id + 1 # [B, 1, H, W]
if return_as_onehot:
out_mask = torch.zeros_like(masks) # [B, nobj, H, W]
for obj_id in range(n_obj):
try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th)
except: raise NotImplementedError
out_mask[:, obj_id] = tmp_mask[:,0].type(torch.cuda.FloatTensor)
return out_mask
| 1,315 | 34.567568 | 84 | py |
IVOS-ATNet | IVOS-ATNet-master/libs/analyze_report.py | """ Analyse Global Summary
"""
import os
import json
import matplotlib.pyplot as plt
def analyze_summary(fname, metric = 'J_AND_F'):
METRIC_TXT = {'J': 'J',
'F': 'F',
'J_AND_F': 'J&F',}
with open(fname, 'r') as fp:
summary = json.load(fp)
print('AUC: \t{:.3f}'.format(summary['auc']))
th = summary['metric_at_threshold']['threshold']
met = summary['metric_at_threshold'][metric]
print('{}@{}: \t{:.3f}'.format(METRIC_TXT[metric], th, met))
time = summary['curve']['time']
metric_res = summary['curve'][metric]
iteration = list(range(len(time)))
fig = plt.figure(figsize=(6, 8))
fig.suptitle('[AUC/t: {:.3f}] [{}@{}: {:.3f}]'.format(summary['auc'],METRIC_TXT[metric],th, met), fontsize=16)
ax1 = fig.add_subplot(211)
ax1.plot(time, metric_res)
ax1.plot(time, metric_res,'b.')
# ax1.set_title('[AUC/t: {:.3f}] [J@{}: {:.3f}]'.format(summary['auc'],th, jac) )
ax1.set_ylim([0, 1])
ax1.set_xlim([0, max(time)])
ax1.set_xlabel('Accumulated Time (s)')
ax1.set_ylabel(r'$\mathcal{' + METRIC_TXT[metric] + '}$')
ax1.axvline(th, c='r')
ax1.yaxis.grid(True)
ax2 = fig.add_subplot(212)
ax2.plot(iteration, metric_res)
ax2.plot(iteration, metric_res,'b.')
ax2.set_ylim([0, 1])
ax2.set_xlim([0, len(time)-1])
ax2.set_xlabel('Interactions (n)')
ax2.set_ylabel(r'$\mathcal{' + METRIC_TXT[metric] + '}$')
ax2.yaxis.grid(True)
save_dir = os.path.split(fname)[0]+'/summary_graph_{:.3f}.png'.format(metric_res[-1])
plt.savefig(save_dir)
if __name__ == '__main__':
analyze_summary('/home/yuk/Desktop/IPNet_summary_davis17_val.json', 'J_AND_F') | 1,721 | 32.115385 | 122 | py |
IVOS-ATNet | IVOS-ATNet-master/libs/davis2017_torchdataset.py | from __future__ import division
import os
import numpy as np
import cv2
from libs import utils
from torch.utils.data import Dataset
import json
from PIL import Image
class DAVIS2017(Dataset):
"""DAVIS 2017 dataset constructed using the PyTorch built-in functionalities"""
def __init__(self,
split='val',
root='',
num_frames=None,
custom_frames=None,
transform=None,
retname=False,
seq_name=None,
obj_id=None,
gt_only_first_frame=False,
no_gt=False,
batch_gt=False,
rgb=False,
effective_batch=None,
prev_round_masks = None,#f,h,w
):
"""Loads image to label pairs for tool pose estimation
split: Split or list of splits of the dataset
root: dataset directory with subfolders "JPEGImages" and "Annotations"
num_frames: Select number of frames of the sequence (None for all frames)
custom_frames: List or Tuple with the number of the frames to include
transform: Data transformations
retname: Retrieve meta data in the sample key 'meta'
seq_name: Use a specific sequence
obj_id: Use a specific object of a sequence (If None and sequence is specified, the batch_gt is True)
gt_only_first_frame: Provide the GT only in the first frame
no_gt: No GT is provided
batch_gt: For every frame sequence batch all the different objects gt
rgb: Use RGB channel order in the image
"""
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.db_root_dir = root
self.transform = transform
self.seq_name = seq_name
self.obj_id = obj_id
self.num_frames = num_frames
self.custom_frames = custom_frames
self.retname = retname
self.rgb = rgb
if seq_name is not None and obj_id is None:
batch_gt = True
self.batch_gt = batch_gt
self.all_seqs_list = []
self.seqs = []
for splt in self.split:
with open(os.path.join(self.db_root_dir, 'ImageSets', '2017', splt + '.txt')) as f:
seqs_tmp = f.readlines()
seqs_tmp = list(map(lambda elem: elem.strip(), seqs_tmp))
self.seqs.extend(seqs_tmp)
self.seq_list_file = os.path.join(self.db_root_dir, 'ImageSets', '2017',
'_'.join(self.split) + '_instances.txt')
# Precompute the dictionary with the objects per sequence
if not self._check_preprocess():
self._preprocess()
if self.seq_name is None:
img_list = []
labels = []
prevmask_list= []
for seq in self.seqs:
images = np.sort(os.listdir(os.path.join(self.db_root_dir, 'JPEGImages/480p/', seq.strip())))
images_path = list(map(lambda x: os.path.join('JPEGImages/480p/', seq.strip(), x), images))
lab = np.sort(os.listdir(os.path.join(self.db_root_dir, 'Annotations/480p/', seq.strip())))
lab_path = list(map(lambda x: os.path.join('Annotations/480p/', seq.strip(), x), lab))
if num_frames is not None:
seq_len = len(images_path)
num_frames = min(num_frames, seq_len)
frame_vector = np.arange(num_frames)
frames_ids = list(np.round(frame_vector*seq_len/float(num_frames)).astype(np.int))
frames_ids[-1] = min(frames_ids[-1], seq_len)
images_path = [images_path[x] for x in frames_ids]
if no_gt:
lab_path = [None] * len(images_path)
else:
lab_path = [lab_path[x] for x in frames_ids]
elif isinstance(custom_frames, tuple) or isinstance(custom_frames, list):
assert min(custom_frames) >= 0 and max(custom_frames) <= len(images_path)
images_path = [images_path[x] for x in custom_frames]
prevmask_list = [prev_round_masks[x] for x in custom_frames]
if no_gt:
lab_path = [None] * len(images_path)
else:
lab_path = [lab_path[x] for x in custom_frames]
if gt_only_first_frame:
lab_path = [lab_path[0]]
lab_path.extend([None] * (len(images_path) - 1))
elif no_gt:
lab_path = [None] * len(images_path)
if self.batch_gt:
obj = self.seq_dict[seq]
if -1 in obj:
obj.remove(-1)
for ii in range(len(img_list), len(images_path)+len(img_list)):
self.all_seqs_list.append([obj, ii])
else:
for obj in self.seq_dict[seq]:
if obj != -1:
for ii in range(len(img_list), len(images_path)+len(img_list)):
self.all_seqs_list.append([obj, ii])
img_list.extend(images_path)
labels.extend(lab_path)
else:
# Initialize the per sequence images for online training
assert self.seq_name in self.seq_dict.keys(), '{} not in {} set.'.format(self.seq_name, '_'.join(self.split))
names_img = np.sort(os.listdir(os.path.join(self.db_root_dir, 'JPEGImages/480p/', str(seq_name))))
img_list = list(map(lambda x: os.path.join('JPEGImages/480p/', str(seq_name), x), names_img))
name_label = np.sort(os.listdir(os.path.join(self.db_root_dir, 'Annotations/480p/', str(seq_name))))
labels = list(map(lambda x: os.path.join('Annotations/480p/', str(seq_name), x), name_label))
prevmask_list = []
if num_frames is not None:
seq_len = len(img_list)
num_frames = min(num_frames, seq_len)
frame_vector = np.arange(num_frames)
frames_ids = list(np.round(frame_vector * seq_len / float(num_frames)).astype(np.int))
frames_ids[-1] = min(frames_ids[-1], seq_len)
img_list = [img_list[x] for x in frames_ids]
if no_gt:
labels = [None] * len(img_list)
else:
labels = [labels[x] for x in frames_ids]
elif isinstance(custom_frames, tuple) or isinstance(custom_frames, list):
assert min(custom_frames) >= 0 and max(custom_frames) <= len(img_list)
img_list = [img_list[x] for x in custom_frames]
prevmask_list = [prev_round_masks[x] for x in custom_frames]
if no_gt:
labels = [None] * len(img_list)
else:
labels = [labels[x] for x in custom_frames]
if gt_only_first_frame:
labels = [labels[0]]
labels.extend([None]*(len(img_list)-1))
elif no_gt:
labels = [None] * len(img_list)
if obj_id is not None:
assert obj_id in self.seq_dict[self.seq_name], \
"{} doesn't have this object id {}.".format(self.seq_name, str(obj_id))
if self.batch_gt:
self.obj_id = self.seq_dict[self.seq_name]
if -1 in self.obj_id:
self.obj_id.remove(-1)
self.obj_id = [0]+self.obj_id
assert (len(labels) == len(img_list))
if effective_batch:
self.img_list = img_list * effective_batch
self.labels = labels * effective_batch
else:
self.img_list = img_list
self.labels = labels
self.prevmasks_list = prevmask_list
# print('Done initializing DAVIS2017 '+'_'.join(self.split)+' Dataset')
# print('Number of images: {}'.format(len(self.img_list)))
# if self.seq_name is None:
# print('Number of elements {}'.format(len(self.all_seqs_list)))
def _check_preprocess(self):
_seq_list_file = self.seq_list_file
if not os.path.isfile(_seq_list_file):
return False
else:
self.seq_dict = json.load(open(self.seq_list_file, 'r'))
return True
def _preprocess(self):
self.seq_dict = {}
for seq in self.seqs:
# Read object masks and get number of objects
name_label = np.sort(os.listdir(os.path.join(self.db_root_dir, 'Annotations/480p/', seq)))
label_path = os.path.join(self.db_root_dir, 'Annotations/480p/', seq, name_label[0])
_mask = np.array(Image.open(label_path))
_mask_ids = np.unique(_mask)
n_obj = _mask_ids[-1]
self.seq_dict[seq] = list(range(1, n_obj+1))
with open(self.seq_list_file, 'w') as outfile:
outfile.write('{{\n\t"{:s}": {:s}'.format(self.seqs[0], json.dumps(self.seq_dict[self.seqs[0]])))
for ii in range(1, len(self.seqs)):
outfile.write(',\n\t"{:s}": {:s}'.format(self.seqs[ii], json.dumps(self.seq_dict[self.seqs[ii]])))
outfile.write('\n}\n')
print('Preprocessing finished')
def __len__(self):
if self.seq_name is None:
return len(self.all_seqs_list)
else:
return len(self.img_list)
def __getitem__(self, idx):
# print(idx)
img, gt, prev_round_mask = self.make_img_gt_mask_pair(idx)
pad_img, pad_info = utils.apply_pad(img)
pad_gt= utils.apply_pad(gt, padinfo = pad_info)#h,w,n
sample = {'image': pad_img, 'gt': pad_gt}
if self.retname:
if self.seq_name is None:
obj_id = self.all_seqs_list[idx][0]
img_path = self.img_list[self.all_seqs_list[idx][1]]
else:
obj_id = self.obj_id
img_path = self.img_list[idx]
seq_name = img_path.split('/')[-2]
frame_id = img_path.split('/')[-1].split('.')[-2]
sample['meta'] = {'seq_name': seq_name,
'frame_id': frame_id,
'obj_id': obj_id,
'im_size': (img.shape[0], img.shape[1]),
'pad_size': (pad_img.shape[0], pad_img.shape[1]),
'pad_info': pad_info}
if self.transform is not None:
sample = self.transform(sample)
return sample
def make_img_gt_mask_pair(self, idx):
"""
Make the image-ground-truth pair
"""
prev_round_mask_tmp = self.prevmasks_list[idx]
if self.seq_name is None:
obj_id = self.all_seqs_list[idx][0]
img_path = self.img_list[self.all_seqs_list[idx][1]]
label_path = self.labels[self.all_seqs_list[idx][1]]
else:
obj_id = self.obj_id
img_path = self.img_list[idx]
label_path = self.labels[idx]
seq_name = img_path.split('/')[-2]
n_obj = 1 if isinstance(obj_id, int) else len(obj_id)
img = cv2.imread(os.path.join(self.db_root_dir, img_path))
img = np.array(img, dtype=np.float32)
if self.rgb:
img = img[:, :, [2, 1, 0]]
if label_path is not None:
label = Image.open(os.path.join(self.db_root_dir, label_path))
else:
if self.batch_gt:
gt = np.zeros(np.append(img.shape[:-1], n_obj), dtype=np.float32)
else:
gt = np.zeros(img.shape[:-1], dtype=np.float32)
if label_path is not None:
gt_tmp = np.array(label, dtype=np.uint8)
if self.batch_gt:
gt = np.zeros(np.append(n_obj, gt_tmp.shape), dtype=np.float32)
for ii, k in enumerate(obj_id):
gt[ii, :, :] = gt_tmp == k
gt = gt.transpose((1, 2, 0))
else:
gt = (gt_tmp == obj_id).astype(np.float32)
if self.batch_gt:
prev_round_mask = np.zeros(np.append(img.shape[:-1], n_obj), dtype=np.float32)
for ii, k in enumerate(obj_id):
prev_round_mask[:, :, ii] = prev_round_mask_tmp == k
else:
prev_round_mask = (prev_round_mask_tmp == obj_id).astype(np.float32)
return img, gt, prev_round_mask
def get_img_size(self):
img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[0]))
return list(img.shape[:2])
def __str__(self):
return 'DAVIS2017'
if __name__ =='__main__':
a = DAVIS2017(split='val', custom_frames=[21,22], seq_name='gold-fish', rgb=True, no_gt=False, retname=True,prev_round_masks=np.zeros([40,480,854]))
c= a.__getitem__(0)
b=1 | 13,111 | 42.417219 | 153 | py |
GraphLoG | GraphLoG-main/pretrain_graphlog.py | import argparse
from loader import MoleculeDataset
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import numpy as np
import os, sys
import pdb
import copy
import random
from model import GNN, ProjectNet
from sklearn.metrics import roc_auc_score
from splitters import scaffold_split, random_split, random_scaffold_split
import pandas as pd
from util import ExtractSubstructureContextPair
from torch_geometric.data import DataLoader
from dataloader import DataLoaderSubstructContext
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool
from tensorboardX import SummaryWriter
# Graph pooling functions
def pool_func(x, batch, mode = "mean"):
if mode == "sum":
return global_add_pool(x, batch)
elif mode == "mean":
return global_mean_pool(x, batch)
elif mode == "max":
return global_max_pool(x, batch)
# Mask some nodes in a graph
def mask_nodes(batch, args, num_atom_type=119):
masked_node_indices = list()
# select indices of masked nodes
for i in range(batch.batch[-1] + 1):
idx = torch.nonzero((batch.batch == i).float()).squeeze(-1)
num_node = idx.shape[0]
if args.mask_num == 0:
sample_size = int(num_node * args.mask_rate + 1)
else:
sample_size = min(args.mask_num, int(num_node * 0.5))
masked_node_idx = random.sample(idx.tolist(), sample_size)
masked_node_idx.sort()
masked_node_indices += masked_node_idx
batch.masked_node_indices = torch.tensor(masked_node_indices)
# mask nodes' features
for node_idx in masked_node_indices:
batch.x[node_idx] = torch.tensor([num_atom_type, 0])
return batch
# NCE loss within a graph
def intra_NCE_loss(node_reps, node_modify_reps, batch, tau=0.1, epsilon=1e-6):
node_reps_norm = torch.norm(node_reps, dim = 1).unsqueeze(-1)
node_modify_reps_norm = torch.norm(node_modify_reps, dim = 1).unsqueeze(-1)
sim = torch.mm(node_reps, node_modify_reps.t()) / (
torch.mm(node_reps_norm, node_modify_reps_norm.t()) + epsilon)
exp_sim = torch.exp(sim / tau)
mask = torch.stack([(batch.batch == i).float() for i in batch.batch.tolist()], dim = 1)
exp_sim_mask = exp_sim * mask
exp_sim_all = torch.index_select(exp_sim_mask, 1, batch.masked_node_indices)
exp_sim_positive = torch.index_select(exp_sim_all, 0, batch.masked_node_indices)
positive_ratio = exp_sim_positive.sum(0) / (exp_sim_all.sum(0) + epsilon)
NCE_loss = -torch.log(positive_ratio).sum() / batch.masked_node_indices.shape[0]
mask_select = torch.index_select(mask, 1, batch.masked_node_indices)
thr = 1. / mask_select.sum(0)
correct_cnt = (positive_ratio > thr).float().sum()
return NCE_loss, correct_cnt
# NCE loss across different graphs
def inter_NCE_loss(graph_reps, graph_modify_reps, device, tau=0.1, epsilon=1e-6):
graph_reps_norm = torch.norm(graph_reps, dim = 1).unsqueeze(-1)
graph_modify_reps_norm = torch.norm(graph_modify_reps, dim = 1).unsqueeze(-1)
sim = torch.mm(graph_reps, graph_modify_reps.t()) / (
torch.mm(graph_reps_norm, graph_modify_reps_norm.t()) + epsilon)
exp_sim = torch.exp(sim / tau)
mask = torch.eye(graph_reps.shape[0]).to(device)
positive = (exp_sim * mask).sum(0)
negative = (exp_sim * (1 - mask)).sum(0)
positive_ratio = positive / (positive + negative + epsilon)
NCE_loss = -torch.log(positive_ratio).sum() / graph_reps.shape[0]
thr = 1. / ((1 - mask).sum(0) + 1.)
correct_cnt = (positive_ratio > thr).float().sum()
return NCE_loss, correct_cnt
# NCE loss for global-local mutual information maximization
def gl_NCE_loss(node_reps, graph_reps, batch, tau=0.1, epsilon=1e-6):
node_reps_norm = torch.norm(node_reps, dim = 1).unsqueeze(-1)
graph_reps_norm = torch.norm(graph_reps, dim = 1).unsqueeze(-1)
sim = torch.mm(node_reps, graph_reps.t()) / (
torch.mm(node_reps_norm, graph_reps_norm.t()) + epsilon)
exp_sim = torch.exp(sim / tau)
mask = torch.stack([(batch == i).float() for i in range(graph_reps.shape[0])], dim = 1)
positive = exp_sim * mask
negative = exp_sim * (1 - mask)
positive_ratio = positive / (positive + negative.sum(0).unsqueeze(0) + epsilon)
NCE_loss = -torch.log(positive_ratio + (1 - mask)).sum() / node_reps.shape[0]
thr = 1. / ((1 - mask).sum(0) + 1.).unsqueeze(0)
correct_cnt = (positive_ratio > thr).float().sum()
return NCE_loss, correct_cnt
# NCE loss between graphs and prototypes
def proto_NCE_loss(graph_reps, tau=0.1, epsilon=1e-6):
global proto, proto_connection
# similarity for original and modified graphs
graph_reps_norm = torch.norm(graph_reps, dim=1).unsqueeze(-1)
exp_sim_list = []
mask_list = []
NCE_loss = 0
for i in range(len(proto)-1, -1, -1):
tmp_proto = proto[i]
proto_norm = torch.norm(tmp_proto, dim=1).unsqueeze(-1)
sim = torch.mm(graph_reps, tmp_proto.t()) / (
torch.mm(graph_reps_norm, proto_norm.t()) + epsilon)
exp_sim = torch.exp(sim / tau)
if i != (len(proto) - 1):
# apply the connection mask
exp_sim_last = exp_sim_list[-1]
idx_last = torch.argmax(exp_sim_last, dim = 1).unsqueeze(-1)
connection = proto_connection[i]
connection_mask = (connection.unsqueeze(0) == idx_last.float()).float()
exp_sim = exp_sim * connection_mask
# define NCE loss between prototypes from consecutive layers
upper_proto = proto[i+1]
upper_proto_norm = torch.norm(upper_proto, dim=1).unsqueeze(-1)
proto_sim = torch.mm(tmp_proto, upper_proto.t()) / (
torch.mm(proto_norm, upper_proto_norm.t()) + epsilon)
proto_exp_sim = torch.exp(proto_sim / tau)
proto_positive_list = [proto_exp_sim[j, connection[j].long()] for j in range(proto_exp_sim.shape[0])]
proto_positive = torch.stack(proto_positive_list, dim=0)
proto_positive_ratio = proto_positive / (proto_exp_sim.sum(1) + epsilon)
NCE_loss += -torch.log(proto_positive_ratio).mean()
mask = (exp_sim == exp_sim.max(1)[0].unsqueeze(-1)).float()
exp_sim_list.append(exp_sim)
mask_list.append(mask)
# define NCE loss between graph embedding and prototypes
for i in range(len(proto)):
exp_sim = exp_sim_list[i]
mask = mask_list[i]
positive = exp_sim * mask
negative = exp_sim * (1 - mask)
positive_ratio = positive.sum(1) / (positive.sum(1) + negative.sum(1) + epsilon)
NCE_loss += -torch.log(positive_ratio).mean()
return NCE_loss
# Update prototypes with batch information
def update_proto_lowest(graph_reps, decay_ratio=0.7, epsilon=1e-6):
global proto, proto_state
graph_reps_norm = torch.norm(graph_reps, dim=1).unsqueeze(-1)
proto_norm = torch.norm(proto[0], dim=1).unsqueeze(-1)
sim = torch.mm(graph_reps, proto[0].t()) / (
torch.mm(graph_reps_norm, proto_norm.t()) + epsilon)
# update states of prototypes
mask = (sim == sim.max(1)[0].unsqueeze(-1)).float()
cnt = mask.sum(0)
proto_state[0].data = proto_state[0].data + cnt.data
# update prototypes
batch_cnt = mask.t() / (cnt.unsqueeze(-1) + epsilon)
batch_mean = torch.mm(batch_cnt, graph_reps)
proto[0].data = proto[0].data * (cnt == 0).float().unsqueeze(-1).data + (
proto[0].data * decay_ratio + batch_mean.data * (1 - decay_ratio)) * (cnt != 0).float().unsqueeze(-1).data
return
# Initialze prototypes and their state
def init_proto_lowest(args, model, proj, loader, device, num_iter = 5):
model.eval()
proj.eval()
for iter in range(num_iter):
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
# get node and graph representations
node_reps = model(batch.x, batch.edge_index, batch.edge_attr)
graph_reps = pool_func(node_reps, batch.batch, mode=args.graph_pooling)
# feature projection
graph_reps_proj = proj(graph_reps)
# update prototypes
update_proto_lowest(graph_reps_proj, decay_ratio = args.decay_ratio)
global proto, proto_state
idx = torch.nonzero((proto_state[0] >= 2).float()).squeeze(-1)
proto_selected = torch.index_select(proto[0], 0, idx)
proto_selected.requires_grad = True
return proto_selected
# Initialze prototypes and their state
def init_proto(args, index, device, num_iter = 20):
global proto, proto_state
proto_connection = torch.zeros(proto[index-1].shape[0]).to(device)
for iter in range(num_iter):
for i in range(proto[index-1].shape[0]):
# update the closest prototype
sim = torch.mm(proto[index], proto[index-1][i,:].unsqueeze(-1)).squeeze(-1)
idx = torch.argmax(sim)
if iter == (num_iter - 1):
proto_state[index][idx] = 1
proto_connection[i] = idx
proto[index].data[idx, :] = proto[index].data[idx, :] * args.decay_ratio + \
proto[index-1].data[i, :] * (1 - args.decay_ratio)
# penalize rival
sim[idx] = 0
rival_idx = torch.argmax(sim)
proto[index].data[rival_idx, :] = proto[index].data[rival_idx, :] * (2 - args.decay_ratio) - \
proto[index-1].data[i, :] * (1 - args.decay_ratio)
indices = torch.nonzero(proto_state[index]).squeeze(-1)
proto_selected = torch.index_select(proto[index], 0, indices)
proto_selected.requires_grad = True
for i in range(indices.shape[0]):
idx = indices[i]
idx_connection = torch.nonzero((proto_connection == idx.float()).float()).squeeze(-1)
proto_connection[idx_connection] = i
return proto_selected, proto_connection
# For one epoch pretraining
def pretrain(args, model, proj, loader, optimizer, device):
model.train()
proj.train()
NCE_loss_intra_cnt = 0
NCE_loss_inter_cnt = 0
correct_intra_cnt = 0
correct_inter_cnt = 0
total_intra_cnt = 0
total_inter_cnt = 0
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch_modify = copy.deepcopy(batch)
batch_modify = mask_nodes(batch_modify, args)
batch, batch_modify = batch.to(device), batch_modify.to(device)
# get node and graph representations
node_reps = model(batch.x, batch.edge_index, batch.edge_attr)
node_modify_reps = model(batch_modify.x, batch_modify.edge_index, batch_modify.edge_attr)
graph_reps = pool_func(node_reps, batch.batch, mode=args.graph_pooling)
graph_modify_reps = pool_func(node_modify_reps, batch_modify.batch, mode=args.graph_pooling)
# feature projection
node_reps_proj = proj(node_reps)
node_modify_reps_proj = proj(node_modify_reps)
graph_reps_proj = proj(graph_reps)
graph_modify_reps_proj = proj(graph_modify_reps)
# NCE loss
NCE_loss_intra, correct_intra = intra_NCE_loss(node_reps_proj, node_modify_reps_proj,
batch_modify, tau=args.tau)
NCE_loss_inter, correct_inter = inter_NCE_loss(graph_reps_proj, graph_modify_reps_proj,
device, tau=args.tau)
NCE_loss_intra_cnt += NCE_loss_intra.item()
NCE_loss_inter_cnt += NCE_loss_inter.item()
correct_intra_cnt += correct_intra
correct_inter_cnt += correct_inter
total_intra_cnt += batch_modify.masked_node_indices.shape[0]
total_inter_cnt += graph_reps.shape[0]
# optimization
optimizer.zero_grad()
NCE_loss = args.alpha * NCE_loss_intra + args.beta * NCE_loss_inter
NCE_loss.backward()
optimizer.step()
if (step + 1) % args.disp_interval == 0:
print(
'iteration: %d, intra NCE loss: %f, intra acc: %f, inter NCE loss: %f, inter acc: %f' % (
step + 1, NCE_loss_intra.item(), float(correct_intra_cnt) / float(total_intra_cnt),
NCE_loss_inter.item(), float(correct_inter_cnt) / float(total_inter_cnt)))
return NCE_loss_intra_cnt / step, float(correct_intra_cnt) / float(
total_intra_cnt), NCE_loss_inter_cnt / step, float(correct_inter_cnt) / float(total_inter_cnt)
# For every epoch training
def train(args, model, proj, loader, optimizer, device):
global proto, proto_connection
model.train()
proj.train()
NCE_loss_intra_cnt = 0
NCE_loss_inter_cnt = 0
NCE_loss_proto_cnt = 0
correct_intra_cnt = 0
correct_inter_cnt = 0
total_intra_cnt = 0
total_inter_cnt = 0
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch_modify = copy.deepcopy(batch)
batch_modify = mask_nodes(batch_modify, args)
batch, batch_modify = batch.to(device), batch_modify.to(device)
# get node and graph representations
node_reps = model(batch.x, batch.edge_index, batch.edge_attr)
node_modify_reps = model(batch_modify.x, batch_modify.edge_index, batch_modify.edge_attr)
graph_reps = pool_func(node_reps, batch.batch, mode=args.graph_pooling)
graph_modify_reps = pool_func(node_modify_reps, batch_modify.batch, mode=args.graph_pooling)
# feature projection
node_reps_proj = proj(node_reps)
node_modify_reps_proj = proj(node_modify_reps)
graph_reps_proj = proj(graph_reps)
graph_modify_reps_proj = proj(graph_modify_reps)
# NCE loss
NCE_loss_intra, correct_intra = intra_NCE_loss(node_reps_proj, node_modify_reps_proj,
batch_modify, tau=args.tau)
NCE_loss_inter, correct_inter = inter_NCE_loss(graph_reps_proj, graph_modify_reps_proj,
device, tau=args.tau)
NCE_loss_proto = proto_NCE_loss(graph_reps_proj, tau=args.tau)
NCE_loss_intra_cnt += NCE_loss_intra.item()
NCE_loss_inter_cnt += NCE_loss_inter.item()
NCE_loss_proto_cnt += NCE_loss_proto.item()
correct_intra_cnt += correct_intra
correct_inter_cnt += correct_inter
total_intra_cnt += batch_modify.masked_node_indices.shape[0]
total_inter_cnt += graph_reps.shape[0]
# optimization
optimizer.zero_grad()
NCE_loss = args.alpha * NCE_loss_intra + args.beta * NCE_loss_inter + \
args.gamma * NCE_loss_proto
NCE_loss.backward()
optimizer.step()
if (step + 1) % args.disp_interval == 0:
print(
'iteration: %d, intra NCE loss: %f, intra acc: %f, inter NCE loss: %f, inter acc: %f' % (
step + 1, NCE_loss_intra.item(), float(correct_intra_cnt) / float(total_intra_cnt),
NCE_loss_inter.item(), float(correct_inter_cnt) / float(total_inter_cnt)))
template = 'iteration: %d, proto NCE loss: %f'
value_list = [step + 1, NCE_loss_proto.item()]
for i in range(args.hierarchy):
template += (', active num ' + str(i+1) + ': %d')
value_list.append(proto[i].shape[0])
print (template % tuple(value_list))
return NCE_loss_intra_cnt / step, float(correct_intra_cnt) / float(
total_intra_cnt), NCE_loss_inter_cnt / step, float(correct_inter_cnt) / float(
total_inter_cnt), NCE_loss_proto_cnt / step
def main():
# Training settings
parser = argparse.ArgumentParser(description='GraphLoG for GNN pre-training')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=512,
help='input batch size for training (default: 512)')
parser.add_argument('--local_epochs', type=int, default=1,
help='number of epochs for local learning (default: 1)')
parser.add_argument('--global_epochs', type=int, default=10,
help='number of epochs for global learning (default: 10)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.001)')
parser.add_argument('--decay', type=float, default=0,
help='weight decay (default: 0)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5).')
parser.add_argument('--emb_dim', type=int, default=300,
help='embedding dimensions (default: 300)')
parser.add_argument('--dropout_ratio', type=float, default=0,
help='dropout ratio (default: 0)')
parser.add_argument('--mask_rate', type=float, default=0.3,
help='dropout ratio (default: 0.3)')
parser.add_argument('--mask_num', type=int, default=0,
help='the number of modified nodes (default: 0)')
parser.add_argument('--JK', type=str, default="last",
help='how the node features are combined across layers. last, sum, max or concat')
parser.add_argument('--graph_pooling', type=str, default="mean",
help='graph level pooling (sum, mean, max)')
parser.add_argument('--dataset', type=str, default='zinc_standard_agent',
help='root directory of dataset for pretraining')
parser.add_argument('--output_model_file', type=str, default='', help='filename to output the model')
parser.add_argument('--gnn_type', type=str, default="gin")
parser.add_argument('--seed', type=int, default=0, help="Seed for splitting dataset.")
parser.add_argument('--num_workers', type=int, default=1, help='number of workers for dataset loading')
parser.add_argument('--tau', type=float, default=0.04, help='the temperature parameter for softmax')
parser.add_argument('--decay_ratio', type=float, default=0.95, help='the decay ratio for moving average')
parser.add_argument('--num_proto', type=int, default=50, help='the number of initial prototypes')
parser.add_argument('--hierarchy', type=int, default=3, help='the number of hierarchy')
parser.add_argument('--alpha', type=float, default=1, help='the weight of intra-graph NCE loss')
parser.add_argument('--beta', type=float, default=1, help='the weight of inter-graph NCE loss')
parser.add_argument('--gamma', type=float, default=0.1, help='the weight of prototype NCE loss')
parser.add_argument('--disp_interval', type=int, default=10, help='the display interval')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
print("num GNN layer: %d" % (args.num_layer))
# set up dataset and transform function.
dataset = MoleculeDataset("./dataset/" + args.dataset, dataset=args.dataset)
loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
# set up pretraining models and feature projector
model = GNN(args.num_layer, args.emb_dim, JK=args.JK, drop_ratio=args.dropout_ratio,
gnn_type=args.gnn_type).to(device)
if args.JK == 'concat':
proj = ProjectNet((args.num_layer + 1) * args.emb_dim).to(device)
else:
proj = ProjectNet(args.emb_dim).to(device)
# set up the optimizer for pretraining
model_param_group = [{"params": model.parameters(), "lr": args.lr},
{"params": proj.parameters(), "lr": args.lr}]
optimizer_pretrain = optim.Adam(model_param_group, lr=args.lr, weight_decay=args.decay)
# initialize prototypes and their state
global proto, proto_state, proto_connection
if args.JK == 'concat':
proto = [torch.rand((args.num_proto, (args.num_layer + 1) * args.emb_dim)).to(device) for i in
range(args.hierarchy)]
else:
proto = [torch.rand((args.num_proto, args.emb_dim)).to(device) for i in range(args.hierarchy)]
proto_state = [torch.zeros(args.num_proto).to(device) for i in range(args.hierarchy)]
proto_connection = []
# pre-training with only local objective
for epoch in range(1, args.local_epochs + 1):
print("====epoch " + str(epoch))
train_intra_loss, train_intra_acc, train_inter_loss, train_inter_acc = pretrain(
args, model, proj, loader, optimizer_pretrain, device)
print(train_intra_loss, train_intra_acc, train_inter_loss, train_inter_acc)
print("")
# initialize prototypes and their state according to pretrained representations
print("Initalize prototypes: layer 1")
tmp_proto = init_proto_lowest(args, model, proj, loader, device)
proto[0] = tmp_proto
for i in range(1, args.hierarchy):
print ("Initialize prototypes: layer ", i + 1)
tmp_proto, tmp_proto_connection = init_proto(args, i, device)
proto[i] = tmp_proto
proto_connection.append(tmp_proto_connection)
# set up the optimizer
model_param_group = [{"params": model.parameters(), "lr": args.lr},
{"params": proj.parameters(), "lr": args.lr}]
for i in range(args.hierarchy):
model_param_group += [{'params': proto[i], 'lr': args.lr, 'weight_decay': 0}]
optimizer = optim.Adam(model_param_group, lr=args.lr, weight_decay=args.decay)
# Training with local and global objectives
for epoch in range(1, args.global_epochs + 1):
print("====epoch " + str(epoch))
train_intra_loss, train_intra_acc, train_inter_loss, train_inter_acc, train_proto_loss = train(
args, model, proj, loader, optimizer, device)
print(train_intra_loss, train_intra_acc, train_inter_loss, train_inter_acc, train_proto_loss)
if not args.output_model_file == "":
torch.save(model.state_dict(), args.output_model_file + ".pth")
os.system('watch nvidia-smi')
if __name__ == "__main__":
main() | 22,536 | 43.364173 | 118 | py |
GraphLoG | GraphLoG-main/batch.py | import torch
from torch_geometric.data import Data, Batch
class BatchMasking(Data):
r"""A plain old python object modeling a batch of graphs as one big
(dicconnected) graph. With :class:`torch_geometric.data.Data` being the
base class, all its methods can also be used here.
In addition, single graphs can be reconstructed via the assignment vector
:obj:`batch`, which maps each node to its respective graph identifier.
"""
def __init__(self, batch=None, **kwargs):
super(BatchMasking, self).__init__(**kwargs)
self.batch = batch
@staticmethod
def from_data_list(data_list):
r"""Constructs a batch object from a python list holding
:class:`torch_geometric.data.Data` objects.
The assignment vector :obj:`batch` is created on the fly."""
keys = [set(data.keys) for data in data_list]
keys = list(set.union(*keys))
assert 'batch' not in keys
batch = BatchMasking()
for key in keys:
batch[key] = []
batch.batch = []
cumsum_node = 0
cumsum_edge = 0
for i, data in enumerate(data_list):
num_nodes = data.num_nodes
batch.batch.append(torch.full((num_nodes, ), i, dtype=torch.long))
for key in data.keys:
item = data[key]
if key in ['edge_index', 'masked_atom_indices']:
item = item + cumsum_node
elif key == 'connected_edge_indices':
item = item + cumsum_edge
batch[key].append(item)
cumsum_node += num_nodes
cumsum_edge += data.edge_index.shape[1]
for key in keys:
batch[key] = torch.cat(
batch[key], dim=data_list[0].cat_dim(key, batch[key][0]))
batch.batch = torch.cat(batch.batch, dim=-1)
return batch.contiguous()
def cumsum(self, key, item):
r"""If :obj:`True`, the attribute :obj:`key` with content :obj:`item`
should be added up cumulatively before concatenated together.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
return key in ['edge_index', 'face', 'masked_atom_indices', 'connected_edge_indices']
@property
def num_graphs(self):
"""Returns the number of graphs in the batch."""
return self.batch[-1].item() + 1
class BatchAE(Data):
r"""A plain old python object modeling a batch of graphs as one big
(dicconnected) graph. With :class:`torch_geometric.data.Data` being the
base class, all its methods can also be used here.
In addition, single graphs can be reconstructed via the assignment vector
:obj:`batch`, which maps each node to its respective graph identifier.
"""
def __init__(self, batch=None, **kwargs):
super(BatchAE, self).__init__(**kwargs)
self.batch = batch
@staticmethod
def from_data_list(data_list):
r"""Constructs a batch object from a python list holding
:class:`torch_geometric.data.Data` objects.
The assignment vector :obj:`batch` is created on the fly."""
keys = [set(data.keys) for data in data_list]
keys = list(set.union(*keys))
assert 'batch' not in keys
batch = BatchAE()
for key in keys:
batch[key] = []
batch.batch = []
cumsum_node = 0
for i, data in enumerate(data_list):
num_nodes = data.num_nodes
batch.batch.append(torch.full((num_nodes, ), i, dtype=torch.long))
for key in data.keys:
item = data[key]
if key in ['edge_index', 'negative_edge_index']:
item = item + cumsum_node
batch[key].append(item)
cumsum_node += num_nodes
for key in keys:
batch[key] = torch.cat(
batch[key], dim=batch.cat_dim(key))
batch.batch = torch.cat(batch.batch, dim=-1)
return batch.contiguous()
@property
def num_graphs(self):
"""Returns the number of graphs in the batch."""
return self.batch[-1].item() + 1
def cat_dim(self, key):
return -1 if key in ["edge_index", "negative_edge_index"] else 0
class BatchSubstructContext(Data):
r"""A plain old python object modeling a batch of graphs as one big
(dicconnected) graph. With :class:`torch_geometric.data.Data` being the
base class, all its methods can also be used here.
In addition, single graphs can be reconstructed via the assignment vector
:obj:`batch`, which maps each node to its respective graph identifier.
"""
"""
Specialized batching for substructure context pair!
"""
def __init__(self, batch=None, **kwargs):
super(BatchSubstructContext, self).__init__(**kwargs)
self.batch = batch
@staticmethod
def from_data_list(data_list):
r"""Constructs a batch object from a python list holding
:class:`torch_geometric.data.Data` objects.
The assignment vector :obj:`batch` is created on the fly."""
#keys = [set(data.keys) for data in data_list]
#keys = list(set.union(*keys))
#assert 'batch' not in keys
batch = BatchSubstructContext()
keys = ["center_substruct_idx", "edge_attr_substruct", "edge_index_substruct", "x_substruct", "overlap_context_substruct_idx", "edge_attr_context", "edge_index_context", "x_context"]
for key in keys:
#print(key)
batch[key] = []
#batch.batch = []
#used for pooling the context
batch.batch_overlapped_context = []
batch.overlapped_context_size = []
cumsum_main = 0
cumsum_substruct = 0
cumsum_context = 0
i = 0
for data in data_list:
#If there is no context, just skip!!
if hasattr(data, "x_context"):
num_nodes = data.num_nodes
num_nodes_substruct = len(data.x_substruct)
num_nodes_context = len(data.x_context)
#batch.batch.append(torch.full((num_nodes, ), i, dtype=torch.long))
batch.batch_overlapped_context.append(torch.full((len(data.overlap_context_substruct_idx), ), i, dtype=torch.long))
batch.overlapped_context_size.append(len(data.overlap_context_substruct_idx))
###batching for the main graph
#for key in data.keys:
# if not "context" in key and not "substruct" in key:
# item = data[key]
# item = item + cumsum_main if batch.cumsum(key, item) else item
# batch[key].append(item)
###batching for the substructure graph
for key in ["center_substruct_idx", "edge_attr_substruct", "edge_index_substruct", "x_substruct"]:
item = data[key]
item = item + cumsum_substruct if batch.cumsum(key, item) else item
batch[key].append(item)
###batching for the context graph
for key in ["overlap_context_substruct_idx", "edge_attr_context", "edge_index_context", "x_context"]:
item = data[key]
item = item + cumsum_context if batch.cumsum(key, item) else item
batch[key].append(item)
cumsum_main += num_nodes
cumsum_substruct += num_nodes_substruct
cumsum_context += num_nodes_context
i += 1
for key in keys:
batch[key] = torch.cat(
batch[key], dim=batch.cat_dim(key))
#batch.batch = torch.cat(batch.batch, dim=-1)
batch.batch_overlapped_context = torch.cat(batch.batch_overlapped_context, dim=-1)
batch.overlapped_context_size = torch.LongTensor(batch.overlapped_context_size)
return batch.contiguous()
def cat_dim(self, key):
return -1 if key in ["edge_index", "edge_index_substruct", "edge_index_context"] else 0
def cumsum(self, key, item):
r"""If :obj:`True`, the attribute :obj:`key` with content :obj:`item`
should be added up cumulatively before concatenated together.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
return key in ["edge_index", "edge_index_substruct", "edge_index_context", "overlap_context_substruct_idx", "center_substruct_idx"]
@property
def num_graphs(self):
"""Returns the number of graphs in the batch."""
return self.batch[-1].item() + 1
| 8,940 | 38.043668 | 190 | py |
GraphLoG | GraphLoG-main/dataloader.py | import torch.utils.data
from torch.utils.data.dataloader import default_collate
from batch import BatchSubstructContext, BatchMasking, BatchAE
class DataLoaderSubstructContext(torch.utils.data.DataLoader):
r"""Data loader which merges data objects from a
:class:`torch_geometric.data.dataset` to a mini-batch.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How may samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch (default: :obj:`True`)
"""
def __init__(self, dataset, batch_size=1, shuffle=True, **kwargs):
super(DataLoaderSubstructContext, self).__init__(
dataset,
batch_size,
shuffle,
collate_fn=lambda data_list: BatchSubstructContext.from_data_list(data_list),
**kwargs)
class DataLoaderMasking(torch.utils.data.DataLoader):
r"""Data loader which merges data objects from a
:class:`torch_geometric.data.dataset` to a mini-batch.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How may samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch (default: :obj:`True`)
"""
def __init__(self, dataset, batch_size=1, shuffle=True, **kwargs):
super(DataLoaderMasking, self).__init__(
dataset,
batch_size,
shuffle,
collate_fn=lambda data_list: BatchMasking.from_data_list(data_list),
**kwargs)
class DataLoaderAE(torch.utils.data.DataLoader):
r"""Data loader which merges data objects from a
:class:`torch_geometric.data.dataset` to a mini-batch.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How may samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch (default: :obj:`True`)
"""
def __init__(self, dataset, batch_size=1, shuffle=True, **kwargs):
super(DataLoaderAE, self).__init__(
dataset,
batch_size,
shuffle,
collate_fn=lambda data_list: BatchAE.from_data_list(data_list),
**kwargs)
| 2,503 | 36.939394 | 89 | py |
GraphLoG | GraphLoG-main/model.py | import torch
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, degree, softmax
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
import torch.nn.functional as F
from torch_scatter import scatter_add
from torch_geometric.nn.inits import glorot, zeros
num_atom_type = 120 #including the extra mask tokens
num_chirality_tag = 3
num_bond_type = 6 #including aromatic and self-loop edge, and extra masked tokens
num_bond_direction = 3
class GINConv(MessagePassing):
"""
Extension of GIN aggregation to incorporate edge information by concatenation.
Args:
emb_dim (int): dimensionality of embeddings for nodes and edges.
embed_input (bool): whether to embed input or not.
See https://arxiv.org/abs/1810.00826
"""
def __init__(self, emb_dim, aggr = "add"):
super(GINConv, self).__init__()
#multi-layer perceptron
self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, emb_dim))
self.edge_embedding1 = torch.nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = torch.nn.Embedding(num_bond_direction, emb_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
self.aggr = aggr
def forward(self, x, edge_index, edge_attr):
#add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes = x.size(0))
#add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:,0] = 4 #bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
return self.propagate(self.aggr, edge_index, x=x, edge_attr=edge_embeddings)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return self.mlp(aggr_out)
class GCNConv(MessagePassing):
def __init__(self, emb_dim, aggr = "add"):
super(GCNConv, self).__init__()
self.emb_dim = emb_dim
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.edge_embedding1 = torch.nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = torch.nn.Embedding(num_bond_direction, emb_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
self.aggr = aggr
def norm(self, edge_index, num_nodes, dtype):
### assuming that self-loops have been already added in edge_index
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def forward(self, x, edge_index, edge_attr):
#add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes = x.size(0))
#add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:,0] = 4 #bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
norm = self.norm(edge_index, x.size(0), x.dtype)
x = self.linear(x)
return self.propagate(self.aggr, edge_index, x=x, edge_attr=edge_embeddings, norm = norm)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * (x_j + edge_attr)
class GATConv(MessagePassing):
def __init__(self, emb_dim, heads=2, negative_slope=0.2, aggr = "add"):
super(GATConv, self).__init__()
self.aggr = aggr
self.emb_dim = emb_dim
self.heads = heads
self.negative_slope = negative_slope
self.weight_linear = torch.nn.Linear(emb_dim, heads * emb_dim)
self.att = torch.nn.Parameter(torch.Tensor(1, heads, 2 * emb_dim))
self.bias = torch.nn.Parameter(torch.Tensor(emb_dim))
self.edge_embedding1 = torch.nn.Embedding(num_bond_type, heads * emb_dim)
self.edge_embedding2 = torch.nn.Embedding(num_bond_direction, heads * emb_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
self.reset_parameters()
def reset_parameters(self):
glorot(self.att)
zeros(self.bias)
def forward(self, x, edge_index, edge_attr):
#add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes = x.size(0))
#add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:,0] = 4 #bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
x = self.weight_linear(x).view(-1, self.heads, self.emb_dim)
return self.propagate(self.aggr, edge_index, x=x, edge_attr=edge_embeddings)
def message(self, edge_index, x_i, x_j, edge_attr):
edge_attr = edge_attr.view(-1, self.heads, self.emb_dim)
x_j += edge_attr
alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index[0])
return x_j * alpha.view(-1, self.heads, 1)
def update(self, aggr_out):
aggr_out = aggr_out.mean(dim=1)
aggr_out = aggr_out + self.bias
return aggr_out
class GraphSAGEConv(MessagePassing):
def __init__(self, emb_dim, aggr = "mean"):
super(GraphSAGEConv, self).__init__()
self.emb_dim = emb_dim
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.edge_embedding1 = torch.nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = torch.nn.Embedding(num_bond_direction, emb_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
self.aggr = aggr
def forward(self, x, edge_index, edge_attr):
#add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes = x.size(0))
#add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:,0] = 4 #bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
x = self.linear(x)
return self.propagate(self.aggr, edge_index, x=x, edge_attr=edge_embeddings)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return F.normalize(aggr_out, p = 2, dim = -1)
class GNN(torch.nn.Module):
"""
Args:
num_layer (int): the number of GNN layers
emb_dim (int): dimensionality of embeddings
JK (str): last, concat, max or sum.
max_pool_layer (int): the layer from which we use max pool rather than add pool for neighbor aggregation
drop_ratio (float): dropout rate
gnn_type: gin, gcn, graphsage, gat
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, JK = "last", drop_ratio = 0, gnn_type = "gin"):
super(GNN, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.x_embedding1 = torch.nn.Embedding(num_atom_type, emb_dim)
self.x_embedding2 = torch.nn.Embedding(num_chirality_tag, emb_dim)
torch.nn.init.xavier_uniform_(self.x_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.x_embedding2.weight.data)
###List of MLPs
self.gnns = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == "gin":
self.gnns.append(GINConv(emb_dim, aggr = "add"))
elif gnn_type == "gcn":
self.gnns.append(GCNConv(emb_dim))
elif gnn_type == "gat":
self.gnns.append(GATConv(emb_dim))
elif gnn_type == "graphsage":
self.gnns.append(GraphSAGEConv(emb_dim))
###List of batchnorms
self.batch_norms = torch.nn.ModuleList()
for layer in range(num_layer):
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
#def forward(self, x, edge_index, edge_attr):
def forward(self, *argv):
if len(argv) == 3:
x, edge_index, edge_attr = argv[0], argv[1], argv[2]
elif len(argv) == 1:
data = argv[0]
x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr
else:
raise ValueError("unmatched number of arguments.")
x = self.x_embedding1(x[:,0]) + self.x_embedding2(x[:,1])
h_list = [x]
for layer in range(self.num_layer):
h = self.gnns[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
#h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "concat":
node_representation = torch.cat(h_list, dim = 1)
elif self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "max":
h_list = [h.unsqueeze_(0) for h in h_list]
node_representation = torch.max(torch.cat(h_list, dim = 0), dim = 0)[0]
elif self.JK == "sum":
h_list = [h.unsqueeze_(0) for h in h_list]
node_representation = torch.sum(torch.cat(h_list, dim = 0), dim = 0)[0]
return node_representation
class GNN_graphpred(torch.nn.Module):
"""
Extension of GIN to incorporate edge information by concatenation.
Args:
num_layer (int): the number of GNN layers
emb_dim (int): dimensionality of embeddings
num_tasks (int): number of tasks in multi-task learning scenario
drop_ratio (float): dropout rate
JK (str): last, concat, max or sum.
graph_pooling (str): sum, mean, max, attention, set2set
gnn_type: gin, gcn, graphsage, gat
See https://arxiv.org/abs/1810.00826
JK-net: https://arxiv.org/abs/1806.03536
"""
def __init__(self, num_layer, emb_dim, num_tasks, JK = "last", drop_ratio = 0, graph_pooling = "mean", gnn_type = "gin"):
super(GNN_graphpred, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.gnn = GNN(num_layer, emb_dim, JK, drop_ratio, gnn_type = gnn_type)
#Different kind of graph pooling
if graph_pooling == "sum":
self.pool = global_add_pool
elif graph_pooling == "mean":
self.pool = global_mean_pool
elif graph_pooling == "max":
self.pool = global_max_pool
elif graph_pooling == "attention":
if self.JK == "concat":
self.pool = GlobalAttention(gate_nn = torch.nn.Linear((self.num_layer + 1) * emb_dim, 1))
else:
self.pool = GlobalAttention(gate_nn = torch.nn.Linear(emb_dim, 1))
elif graph_pooling[:-1] == "set2set":
set2set_iter = int(graph_pooling[-1])
if self.JK == "concat":
self.pool = Set2Set((self.num_layer + 1) * emb_dim, set2set_iter)
else:
self.pool = Set2Set(emb_dim, set2set_iter)
else:
raise ValueError("Invalid graph pooling type.")
#For graph-level binary classification
if graph_pooling[:-1] == "set2set":
self.mult = 2
else:
self.mult = 1
if self.JK == "concat":
rep_dim = self.mult * (self.num_layer + 1) * self.emb_dim
self.graph_pred_linear = torch.nn.Linear(rep_dim, self.num_tasks)
# self.graph_pred_linear = torch.nn.Sequential(
# torch.nn.Linear(rep_dim, rep_dim),
# torch.nn.ReLU(),
# torch.nn.Linear(rep_dim, self.num_tasks)
# )
else:
rep_dim = self.mult * self.emb_dim
self.graph_pred_linear = torch.nn.Linear(rep_dim, self.num_tasks)
# self.graph_pred_linear = torch.nn.Sequential(
# torch.nn.Linear(rep_dim, rep_dim),
# torch.nn.ReLU(),
# torch.nn.Linear(rep_dim, self.num_tasks)
# )
def from_pretrained(self, model_file):
#self.gnn = GNN(self.num_layer, self.emb_dim, JK = self.JK, drop_ratio = self.drop_ratio)
self.gnn.load_state_dict(torch.load(model_file))
def forward(self, *argv):
if len(argv) == 4:
x, edge_index, edge_attr, batch = argv[0], argv[1], argv[2], argv[3]
elif len(argv) == 1:
data = argv[0]
x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
else:
raise ValueError("unmatched number of arguments.")
node_representation = self.gnn(x, edge_index, edge_attr)
return self.graph_pred_linear(self.pool(node_representation, batch))
class ProjectNet(torch.nn.Module):
def __init__(self, rep_dim):
super(ProjectNet, self).__init__()
self.rep_dim = rep_dim
self.proj = torch.nn.Sequential(
torch.nn.Linear(self.rep_dim, self.rep_dim),
torch.nn.ReLU(),
torch.nn.Linear(self.rep_dim, self.rep_dim)
)
def forward(self, x):
x_proj = self.proj(x)
return x_proj
if __name__ == "__main__":
pass
| 15,224 | 36.967581 | 129 | py |
GraphLoG | GraphLoG-main/finetune.py | import argparse
from loader import MoleculeDataset
from torch_geometric.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from tqdm import tqdm
import os, sys
import numpy as np
import random
from model import GNN, GNN_graphpred
from sklearn.metrics import roc_auc_score
from splitters import scaffold_split
import pandas as pd
import os
import shutil
from tensorboardX import SummaryWriter
criterion = nn.BCEWithLogitsLoss(reduction = "none")
def train(args, model, device, loader, optimizer, scheduler):
model.train()
scheduler.step()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y = batch.y.view(pred.shape).to(torch.float64)
#Whether y is non-null or not.
is_valid = y**2 > 0
#Loss matrix
loss_mat = criterion(pred.double(), (y+1)/2)
#loss matrix after removing null target
loss_mat = torch.where(is_valid, loss_mat, torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))
optimizer.zero_grad()
loss = torch.sum(loss_mat)/torch.sum(is_valid)
loss.backward()
optimizer.step()
def eval(args, model, device, loader):
model.eval()
y_true = []
y_scores = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y_true.append(batch.y.view(pred.shape))
y_scores.append(pred)
y_true = torch.cat(y_true, dim = 0).cpu().numpy()
y_scores = torch.cat(y_scores, dim = 0).cpu().numpy()
roc_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == -1) > 0:
is_valid = y_true[:,i]**2 > 0
roc_list.append(roc_auc_score((y_true[is_valid,i] + 1)/2, y_scores[is_valid,i]))
if len(roc_list) < y_true.shape[1]:
print("Some target is missing!")
print("Missing ratio: %f" %(1 - float(len(roc_list))/y_true.shape[1]))
return sum(roc_list)/len(roc_list) #y_true.shape[1]
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch implementation of pre-training of graph neural networks')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train (default: 100)')
parser.add_argument('--num_run', type=int, default=5,
help='number of independent runs (default: 5)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.001)')
parser.add_argument('--lr_scale', type=float, default=1,
help='relative learning rate for the feature extraction layer (default: 1)')
parser.add_argument('--frozen', action='store_true', default=False,
help='whether to freeze gnn extractor')
parser.add_argument('--decay', type=float, default=0,
help='weight decay (default: 0)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5).')
parser.add_argument('--emb_dim', type=int, default=300,
help='embedding dimensions (default: 300)')
parser.add_argument('--dropout_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--graph_pooling', type=str, default="mean",
help='graph level pooling (sum, mean, max, set2set, attention)')
parser.add_argument('--JK', type=str, default="last",
help='how the node features across layers are combined. last, sum, max or concat')
parser.add_argument('--gnn_type', type=str, default="gin")
parser.add_argument('--dataset', type=str, default = 'bbbp', help='root directory of dataset. For now, only classification.')
parser.add_argument('--input_model_file', type=str, default = '', help='filename to read the model (if there is any)')
parser.add_argument('--filename', type=str, default = '', help='output filename')
parser.add_argument('--seed', type=int, default=None, help = "Seed for splitting the dataset.")
parser.add_argument('--runseed', type=int, default=None, help = "Seed for minibatch selection, random initialization.")
parser.add_argument('--split', type = str, default="scaffold", help = "random or scaffold or random_scaffold")
parser.add_argument('--eval_train', type=int, default = 0, help='evaluating training or not')
parser.add_argument('--num_workers', type=int, default = 1, help='number of workers for dataset loading')
args = parser.parse_args()
if args.seed:
seed = args.seed
print ('Manual seed: ', seed)
else:
seed = random.randint(0, 10000)
print ('Random seed: ', seed)
# Bunch of classification tasks
if args.dataset == "tox21":
num_tasks = 12
elif args.dataset == "hiv":
num_tasks = 1
elif args.dataset == "pcba":
num_tasks = 128
elif args.dataset == "muv":
num_tasks = 17
elif args.dataset == "bace":
num_tasks = 1
elif args.dataset == "bbbp":
num_tasks = 1
elif args.dataset == "toxcast":
num_tasks = 617
elif args.dataset == "sider":
num_tasks = 27
elif args.dataset == "clintox":
num_tasks = 2
else:
raise ValueError("Invalid dataset name.")
# set up dataset
dataset = MoleculeDataset("./dataset/" + args.dataset, dataset=args.dataset)
print(dataset)
if args.split == "scaffold":
smiles_list = pd.read_csv('./dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = scaffold_split(dataset, smiles_list, null_value=0,
frac_train=0.8, frac_valid=0.1, frac_test=0.1)
print("scaffold")
elif args.split == "random":
train_dataset, valid_dataset, test_dataset = random_split(dataset, null_value=0, frac_train=0.8,
frac_valid=0.1, frac_test=0.1, seed=seed)
print("random")
elif args.split == "random_scaffold":
smiles_list = pd.read_csv('./dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = random_scaffold_split(dataset, smiles_list, null_value=0,
frac_train=0.8, frac_valid=0.1,
frac_test=0.1, seed=seed)
print("random scaffold")
else:
raise ValueError("Invalid split option.")
print(train_dataset[0])
# run multiple times
best_valid_auc_list = []
last_epoch_auc_list = []
for run_idx in range(args.num_run):
print ('\nRun ', run_idx + 1)
if args.runseed:
runseed = args.runseed
print('Manual runseed: ', runseed)
else:
runseed = random.randint(0, 10000)
print('Random runseed: ', runseed)
torch.manual_seed(runseed)
np.random.seed(runseed)
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
if torch.cuda.is_available():
torch.cuda.manual_seed_all(runseed)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
val_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
# set up model
model = GNN_graphpred(args.num_layer, args.emb_dim, num_tasks, JK=args.JK, drop_ratio=args.dropout_ratio,
graph_pooling=args.graph_pooling, gnn_type=args.gnn_type)
if not args.input_model_file == "":
model.from_pretrained(args.input_model_file)
model.to(device)
# set up optimizer
# different learning rate for different part of GNN
model_param_group = []
if args.frozen:
model_param_group.append({"params": model.gnn.parameters(), "lr": 0})
else:
model_param_group.append({"params": model.gnn.parameters()})
if args.graph_pooling == "attention":
model_param_group.append({"params": model.pool.parameters(), "lr": args.lr * args.lr_scale})
model_param_group.append({"params": model.graph_pred_linear.parameters(), "lr": args.lr * args.lr_scale})
optimizer = optim.Adam(model_param_group, lr=args.lr, weight_decay=args.decay)
print(optimizer)
scheduler = StepLR(optimizer, step_size=30, gamma=0.3)
# run fine-tuning
best_valid = 0
best_valid_test = 0
last_epoch_test = 0
for epoch in range(1, args.epochs + 1):
print("====epoch " + str(epoch), " lr: ", optimizer.param_groups[-1]['lr'])
train(args, model, device, train_loader, optimizer, scheduler)
print("====Evaluation")
if args.eval_train:
train_acc = eval(args, model, device, train_loader)
else:
print("omit the training accuracy computation")
train_acc = 0
val_acc = eval(args, model, device, val_loader)
test_acc = eval(args, model, device, test_loader)
if val_acc > best_valid:
best_valid = val_acc
best_valid_test = test_acc
if epoch == args.epochs:
last_epoch_test = test_acc
print("train: %f val: %f test: %f" % (train_acc, val_acc, test_acc))
print("")
best_valid_auc_list.append(best_valid_test)
last_epoch_auc_list.append(last_epoch_test)
# summarize results
best_valid_auc_list = np.array(best_valid_auc_list)
last_epoch_auc_list = np.array(last_epoch_auc_list)
if args.dataset in ["muv", "hiv"]:
print('Best validation epoch:')
print('Mean: {}\tStd: {}'.format(np.mean(best_valid_auc_list), np.std(best_valid_auc_list)))
else:
print('Last epoch:')
print('Mean: {}\tStd: {}'.format(np.mean(last_epoch_auc_list), np.std(last_epoch_auc_list)))
os.system('watch nvidia-smi')
if __name__ == "__main__":
main()
| 11,223 | 40.724907 | 129 | py |
GraphLoG | GraphLoG-main/splitters.py | import torch
import random
import numpy as np
from itertools import compress
from rdkit.Chem.Scaffolds import MurckoScaffold
from collections import defaultdict
from sklearn.model_selection import StratifiedKFold
# splitter function
def generate_scaffold(smiles, include_chirality=False):
"""
Obtain Bemis-Murcko scaffold from smiles
:param smiles:
:param include_chirality:
:return: smiles of scaffold
"""
scaffold = MurckoScaffold.MurckoScaffoldSmiles(
smiles=smiles, includeChirality=include_chirality)
return scaffold
# # test generate_scaffold
# s = 'Cc1cc(Oc2nccc(CCC)c2)ccc1'
# scaffold = generate_scaffold(s)
# assert scaffold == 'c1ccc(Oc2ccccn2)cc1'
def scaffold_split(dataset, smiles_list, task_idx=None, null_value=0,
frac_train=0.8, frac_valid=0.1, frac_test=0.1,
return_smiles=False):
"""
Adapted from https://github.com/deepchem/deepchem/blob/master/deepchem/splits/splitters.py
Split dataset by Bemis-Murcko scaffolds
This function can also ignore examples containing null values for a
selected task when splitting. Deterministic split
:param dataset: pytorch geometric dataset obj
:param smiles_list: list of smiles corresponding to the dataset obj
:param task_idx: column idx of the data.y tensor. Will filter out
examples with null value in specified task column of the data.y tensor
prior to splitting. If None, then no filtering
:param null_value: float that specifies null value in data.y to filter if
task_idx is provided
:param frac_train:
:param frac_valid:
:param frac_test:
:param return_smiles:
:return: train, valid, test slices of the input dataset obj. If
return_smiles = True, also returns ([train_smiles_list],
[valid_smiles_list], [test_smiles_list])
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0)
if task_idx != None:
# filter based on null values in task_idx
# get task array
y_task = np.array([data.y[task_idx].item() for data in dataset])
# boolean array that correspond to non null values
non_null = y_task != null_value
smiles_list = list(compress(enumerate(smiles_list), non_null))
else:
non_null = np.ones(len(dataset)) == 1
smiles_list = list(compress(enumerate(smiles_list), non_null))
# create dict of the form {scaffold_i: [idx1, idx....]}
all_scaffolds = {}
for i, smiles in smiles_list:
scaffold = generate_scaffold(smiles, include_chirality=True)
if scaffold not in all_scaffolds:
all_scaffolds[scaffold] = [i]
else:
all_scaffolds[scaffold].append(i)
# sort from largest to smallest sets
all_scaffolds = {key: sorted(value) for key, value in all_scaffolds.items()}
all_scaffold_sets = [
scaffold_set for (scaffold, scaffold_set) in sorted(
all_scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
# get train, valid test indices
train_cutoff = frac_train * len(smiles_list)
valid_cutoff = (frac_train + frac_valid) * len(smiles_list)
train_idx, valid_idx, test_idx = [], [], []
for scaffold_set in all_scaffold_sets:
if len(train_idx) + len(scaffold_set) > train_cutoff:
if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff:
test_idx.extend(scaffold_set)
else:
valid_idx.extend(scaffold_set)
else:
train_idx.extend(scaffold_set)
assert len(set(train_idx).intersection(set(valid_idx))) == 0
assert len(set(test_idx).intersection(set(valid_idx))) == 0
train_dataset = dataset[torch.tensor(train_idx)]
valid_dataset = dataset[torch.tensor(valid_idx)]
test_dataset = dataset[torch.tensor(test_idx)]
if not return_smiles:
return train_dataset, valid_dataset, test_dataset
else:
train_smiles = [smiles_list[i][1] for i in train_idx]
valid_smiles = [smiles_list[i][1] for i in valid_idx]
test_smiles = [smiles_list[i][1] for i in test_idx]
return train_dataset, valid_dataset, test_dataset, (train_smiles,
valid_smiles,
test_smiles)
def random_scaffold_split(dataset, smiles_list, task_idx=None, null_value=0,
frac_train=0.8, frac_valid=0.1, frac_test=0.1, seed=0):
"""
Adapted from https://github.com/pfnet-research/chainer-chemistry/blob/master/chainer_chemistry/dataset/splitters/scaffold_splitter.py
Split dataset by Bemis-Murcko scaffolds
This function can also ignore examples containing null values for a
selected task when splitting. Deterministic split
:param dataset: pytorch geometric dataset obj
:param smiles_list: list of smiles corresponding to the dataset obj
:param task_idx: column idx of the data.y tensor. Will filter out
examples with null value in specified task column of the data.y tensor
prior to splitting. If None, then no filtering
:param null_value: float that specifies null value in data.y to filter if
task_idx is provided
:param frac_train:
:param frac_valid:
:param frac_test:
:param seed;
:return: train, valid, test slices of the input dataset obj
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0)
if task_idx != None:
# filter based on null values in task_idx
# get task array
y_task = np.array([data.y[task_idx].item() for data in dataset])
# boolean array that correspond to non null values
non_null = y_task != null_value
smiles_list = list(compress(enumerate(smiles_list), non_null))
else:
non_null = np.ones(len(dataset)) == 1
smiles_list = list(compress(enumerate(smiles_list), non_null))
rng = np.random.RandomState(seed)
scaffolds = defaultdict(list)
for ind, smiles in smiles_list:
scaffold = generate_scaffold(smiles, include_chirality=True)
scaffolds[scaffold].append(ind)
scaffold_sets = rng.permutation(list(scaffolds.values()))
n_total_valid = int(np.floor(frac_valid * len(dataset)))
n_total_test = int(np.floor(frac_test * len(dataset)))
train_idx = []
valid_idx = []
test_idx = []
for scaffold_set in scaffold_sets:
if len(valid_idx) + len(scaffold_set) <= n_total_valid:
valid_idx.extend(scaffold_set)
elif len(test_idx) + len(scaffold_set) <= n_total_test:
test_idx.extend(scaffold_set)
else:
train_idx.extend(scaffold_set)
train_dataset = dataset[torch.tensor(train_idx)]
valid_dataset = dataset[torch.tensor(valid_idx)]
test_dataset = dataset[torch.tensor(test_idx)]
return train_dataset, valid_dataset, test_dataset
def random_split(dataset, task_idx=None, null_value=0,
frac_train=0.8, frac_valid=0.1, frac_test=0.1, seed=0,
smiles_list=None):
"""
:param dataset:
:param task_idx:
:param null_value:
:param frac_train:
:param frac_valid:
:param frac_test:
:param seed:
:param smiles_list: list of smiles corresponding to the dataset obj, or None
:return: train, valid, test slices of the input dataset obj. If
smiles_list != None, also returns ([train_smiles_list],
[valid_smiles_list], [test_smiles_list])
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0)
if task_idx != None:
# filter based on null values in task_idx
# get task array
y_task = np.array([data.y[task_idx].item() for data in dataset])
non_null = y_task != null_value # boolean array that correspond to non null values
idx_array = np.where(non_null)[0]
dataset = dataset[torch.tensor(idx_array)] # examples containing non
# null labels in the specified task_idx
else:
pass
num_mols = len(dataset)
random.seed(seed)
all_idx = list(range(num_mols))
random.shuffle(all_idx)
train_idx = all_idx[:int(frac_train * num_mols)]
valid_idx = all_idx[int(frac_train * num_mols):int(frac_valid * num_mols)
+ int(frac_train * num_mols)]
test_idx = all_idx[int(frac_valid * num_mols) + int(frac_train * num_mols):]
assert len(set(train_idx).intersection(set(valid_idx))) == 0
assert len(set(valid_idx).intersection(set(test_idx))) == 0
assert len(train_idx) + len(valid_idx) + len(test_idx) == num_mols
train_dataset = dataset[torch.tensor(train_idx)]
valid_dataset = dataset[torch.tensor(valid_idx)]
test_dataset = dataset[torch.tensor(test_idx)]
if not smiles_list:
return train_dataset, valid_dataset, test_dataset
else:
train_smiles = [smiles_list[i] for i in train_idx]
valid_smiles = [smiles_list[i] for i in valid_idx]
test_smiles = [smiles_list[i] for i in test_idx]
return train_dataset, valid_dataset, test_dataset, (train_smiles,
valid_smiles,
test_smiles)
def cv_random_split(dataset, fold_idx = 0,
frac_train=0.9, frac_valid=0.1, seed=0,
smiles_list=None):
"""
:param dataset:
:param task_idx:
:param null_value:
:param frac_train:
:param frac_valid:
:param frac_test:
:param seed:
:param smiles_list: list of smiles corresponding to the dataset obj, or None
:return: train, valid, test slices of the input dataset obj. If
smiles_list != None, also returns ([train_smiles_list],
[valid_smiles_list], [test_smiles_list])
"""
np.testing.assert_almost_equal(frac_train + frac_valid, 1.0)
skf = StratifiedKFold(n_splits=10, shuffle = True, random_state = seed)
labels = [data.y.item() for data in dataset]
idx_list = []
for idx in skf.split(np.zeros(len(labels)), labels):
idx_list.append(idx)
train_idx, val_idx = idx_list[fold_idx]
train_dataset = dataset[torch.tensor(train_idx)]
valid_dataset = dataset[torch.tensor(val_idx)]
return train_dataset, valid_dataset
if __name__ == "__main__":
from loader import MoleculeDataset
from rdkit import Chem
import pandas as pd
# # test scaffold_split
dataset = MoleculeDataset('dataset/tox21', dataset='tox21')
smiles_list = pd.read_csv('dataset/tox21/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = scaffold_split(dataset, smiles_list, task_idx=None, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1)
# train_dataset, valid_dataset, test_dataset = random_scaffold_split(dataset, smiles_list, task_idx=None, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = 0)
unique_ids = set(train_dataset.data.id.tolist() +
valid_dataset.data.id.tolist() +
test_dataset.data.id.tolist())
assert len(unique_ids) == len(dataset) # check that we did not have any
# missing or overlapping examples
# test scaffold_split with smiles returned
dataset = MoleculeDataset('dataset/bbbp', dataset='bbbp')
smiles_list = pd.read_csv('dataset/bbbp/processed/smiles.csv', header=None)[
0].tolist()
train_dataset, valid_dataset, test_dataset, (train_smiles, valid_smiles,
test_smiles) = \
scaffold_split(dataset, smiles_list, task_idx=None, null_value=0,
frac_train=0.8,frac_valid=0.1, frac_test=0.1,
return_smiles=True)
assert len(train_dataset) == len(train_smiles)
for i in range(len(train_dataset)):
data_obj_n_atoms = train_dataset[i].x.size()[0]
smiles_n_atoms = len(list(Chem.MolFromSmiles(train_smiles[
i]).GetAtoms()))
assert data_obj_n_atoms == smiles_n_atoms
assert len(valid_dataset) == len(valid_smiles)
for i in range(len(valid_dataset)):
data_obj_n_atoms = valid_dataset[i].x.size()[0]
smiles_n_atoms = len(list(Chem.MolFromSmiles(valid_smiles[
i]).GetAtoms()))
assert data_obj_n_atoms == smiles_n_atoms
assert len(test_dataset) == len(test_smiles)
for i in range(len(test_dataset)):
data_obj_n_atoms = test_dataset[i].x.size()[0]
smiles_n_atoms = len(list(Chem.MolFromSmiles(test_smiles[
i]).GetAtoms()))
assert data_obj_n_atoms == smiles_n_atoms
# test random_split
from loader import MoleculeDataset
dataset = MoleculeDataset('dataset/tox21', dataset='tox21')
train_dataset, valid_dataset, test_dataset = random_split(dataset, task_idx=None, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1)
unique_ids = set(train_dataset.data.id.tolist() +
valid_dataset.data.id.tolist() +
test_dataset.data.id.tolist())
assert len(unique_ids) == len(dataset) # check that we did not have any
# missing or overlapping examples
# test random_split with smiles returned
dataset = MoleculeDataset('dataset/bbbp', dataset='bbbp')
smiles_list = pd.read_csv('dataset/bbbp/processed/smiles.csv', header=None)[
0].tolist()
train_dataset, valid_dataset, test_dataset, (train_smiles, valid_smiles,
test_smiles) = \
random_split(dataset, task_idx=None, null_value=0,
frac_train=0.8, frac_valid=0.1, frac_test=0.1, seed=42,
smiles_list=smiles_list)
assert len(train_dataset) == len(train_smiles)
for i in range(len(train_dataset)):
data_obj_n_atoms = train_dataset[i].x.size()[0]
smiles_n_atoms = len(list(Chem.MolFromSmiles(train_smiles[
i]).GetAtoms()))
assert data_obj_n_atoms == smiles_n_atoms
assert len(valid_dataset) == len(valid_smiles)
for i in range(len(valid_dataset)):
data_obj_n_atoms = valid_dataset[i].x.size()[0]
smiles_n_atoms = len(list(Chem.MolFromSmiles(valid_smiles[
i]).GetAtoms()))
assert data_obj_n_atoms == smiles_n_atoms
assert len(test_dataset) == len(test_smiles)
for i in range(len(test_dataset)):
data_obj_n_atoms = test_dataset[i].x.size()[0]
smiles_n_atoms = len(list(Chem.MolFromSmiles(test_smiles[
i]).GetAtoms()))
assert data_obj_n_atoms == smiles_n_atoms
| 14,949 | 41.351275 | 179 | py |
GraphLoG | GraphLoG-main/util.py | import torch
import copy
import random
import networkx as nx
import numpy as np
from torch_geometric.utils import convert
from loader import graph_data_obj_to_nx_simple, nx_to_graph_data_obj_simple
from rdkit import Chem
from rdkit.Chem import AllChem
from loader import mol_to_graph_data_obj_simple, \
graph_data_obj_to_mol_simple
from loader import MoleculeDataset
def check_same_molecules(s1, s2):
mol1 = AllChem.MolFromSmiles(s1)
mol2 = AllChem.MolFromSmiles(s2)
return AllChem.MolToInchi(mol1) == AllChem.MolToInchi(mol2)
class NegativeEdge:
def __init__(self):
"""
Randomly sample negative edges
"""
pass
def __call__(self, data):
num_nodes = data.num_nodes
num_edges = data.num_edges
edge_set = set([str(data.edge_index[0, i].cpu().item()) + "," + str(
data.edge_index[1, i].cpu().item()) for i in
range(data.edge_index.shape[1])])
redandunt_sample = torch.randint(0, num_nodes, (2, 5 * num_edges))
sampled_ind = []
sampled_edge_set = set([])
for i in range(5 * num_edges):
node1 = redandunt_sample[0, i].cpu().item()
node2 = redandunt_sample[1, i].cpu().item()
edge_str = str(node1) + "," + str(node2)
if not edge_str in edge_set and not edge_str in sampled_edge_set and not node1 == node2:
sampled_edge_set.add(edge_str)
sampled_ind.append(i)
if len(sampled_ind) == num_edges / 2:
break
data.negative_edge_index = redandunt_sample[:, sampled_ind]
return data
class ExtractSubstructureContextPair:
def __init__(self, k, l1, l2):
"""
Randomly selects a node from the data object, and adds attributes
that contain the substructure that corresponds to k hop neighbours
rooted at the node, and the context substructures that corresponds to
the subgraph that is between l1 and l2 hops away from the
root node.
:param k:
:param l1:
:param l2:
"""
self.k = k
self.l1 = l1
self.l2 = l2
# for the special case of 0, addresses the quirk with
# single_source_shortest_path_length
if self.k == 0:
self.k = -1
if self.l1 == 0:
self.l1 = -1
if self.l2 == 0:
self.l2 = -1
def __call__(self, data, root_idx=None):
"""
:param data: pytorch geometric data object
:param root_idx: If None, then randomly samples an atom idx.
Otherwise sets atom idx of root (for debugging only)
:return: None. Creates new attributes in original data object:
data.center_substruct_idx
data.x_substruct
data.edge_attr_substruct
data.edge_index_substruct
data.x_context
data.edge_attr_context
data.edge_index_context
data.overlap_context_substruct_idx
"""
num_atoms = data.x.size()[0]
if root_idx == None:
root_idx = random.sample(range(num_atoms), 1)[0]
G = graph_data_obj_to_nx_simple(data) # same ordering as input data obj
# Get k-hop subgraph rooted at specified atom idx
substruct_node_idxes = nx.single_source_shortest_path_length(G,
root_idx,
self.k).keys()
if len(substruct_node_idxes) > 0:
substruct_G = G.subgraph(substruct_node_idxes)
substruct_G, substruct_node_map = reset_idxes(substruct_G) # need
# to reset node idx to 0 -> num_nodes - 1, otherwise data obj does not
# make sense, since the node indices in data obj must start at 0
substruct_data = nx_to_graph_data_obj_simple(substruct_G)
data.x_substruct = substruct_data.x
data.edge_attr_substruct = substruct_data.edge_attr
data.edge_index_substruct = substruct_data.edge_index
data.center_substruct_idx = torch.tensor([substruct_node_map[
root_idx]]) # need
# to convert center idx from original graph node ordering to the
# new substruct node ordering
# Get subgraphs that is between l1 and l2 hops away from the root node
l1_node_idxes = nx.single_source_shortest_path_length(G, root_idx,
self.l1).keys()
l2_node_idxes = nx.single_source_shortest_path_length(G, root_idx,
self.l2).keys()
context_node_idxes = set(l1_node_idxes).symmetric_difference(
set(l2_node_idxes))
if len(context_node_idxes) > 0:
context_G = G.subgraph(context_node_idxes)
context_G, context_node_map = reset_idxes(context_G) # need to
# reset node idx to 0 -> num_nodes - 1, otherwise data obj does not
# make sense, since the node indices in data obj must start at 0
context_data = nx_to_graph_data_obj_simple(context_G)
data.x_context = context_data.x
data.edge_attr_context = context_data.edge_attr
data.edge_index_context = context_data.edge_index
# Get indices of overlapping nodes between substruct and context,
# WRT context ordering
context_substruct_overlap_idxes = list(set(
context_node_idxes).intersection(set(substruct_node_idxes)))
if len(context_substruct_overlap_idxes) > 0:
context_substruct_overlap_idxes_reorder = [context_node_map[old_idx]
for
old_idx in
context_substruct_overlap_idxes]
# need to convert the overlap node idxes, which is from the
# original graph node ordering to the new context node ordering
data.overlap_context_substruct_idx = \
torch.tensor(context_substruct_overlap_idxes_reorder)
return data
def __repr__(self):
return '{}(k={},l1={}, l2={})'.format(self.__class__.__name__, self.k,
self.l1, self.l2)
def reset_idxes(G):
"""
Resets node indices such that they are numbered from 0 to num_nodes - 1
:param G:
:return: copy of G with relabelled node indices, mapping
"""
mapping = {}
for new_idx, old_idx in enumerate(G.nodes()):
mapping[old_idx] = new_idx
new_G = nx.relabel_nodes(G, mapping, copy=True)
return new_G, mapping
class MaskAtom:
def __init__(self, num_atom_type, num_edge_type, mask_rate, mask_num=0, mask_edge=True):
"""
Randomly masks an atom, and optionally masks edges connecting to it.
The mask atom type index is num_possible_atom_type
The mask edge type index in num_possible_edge_type
:param num_atom_type:
:param num_edge_type:
:param mask_rate: % of atoms to be masked
:param mask_num: number of atoms to be masked
:param mask_edge: If True, also mask the edges that connect to the
masked atoms
"""
self.num_atom_type = num_atom_type
self.num_edge_type = num_edge_type
self.mask_rate = mask_rate
self.mask_num = mask_num
self.mask_edge = mask_edge
def __call__(self, data, masked_atom_indices=None):
"""
:param data: pytorch geometric data object. Assume that the edge
ordering is the default pytorch geometric ordering, where the two
directions of a single edge occur in pairs.
Eg. data.edge_index = tensor([[0, 1, 1, 2, 2, 3],
[1, 0, 2, 1, 3, 2]])
:param masked_atom_indices: If None, then randomly samples num_atoms
* mask rate number of atom indices
Otherwise a list of atom idx that sets the atoms to be masked (for
debugging only)
:return: None, Creates new attributes in original data object:
data.mask_node_idx
data.mask_node_label
data.mask_edge_idx
data.mask_edge_label
"""
if masked_atom_indices == None:
# sample x distinct atoms to be masked, based on mask rate. But
# will sample at least 1 atom
num_atoms = data.x.size()[0]
if self.mask_num == 0:
sample_size = int(num_atoms * self.mask_rate + 1)
else:
sample_size = self.mask_num
masked_atom_indices = random.sample(range(num_atoms), sample_size)
# create mask node label by copying atom feature of mask atom
mask_node_labels_list = []
for atom_idx in masked_atom_indices:
mask_node_labels_list.append(data.x[atom_idx].view(1, -1))
data.mask_node_label = torch.cat(mask_node_labels_list, dim=0)
data.masked_atom_indices = torch.tensor(masked_atom_indices)
# modify the original node feature of the masked node
for atom_idx in masked_atom_indices:
data.x[atom_idx] = torch.tensor([self.num_atom_type, 0])
if self.mask_edge:
# create mask edge labels by copying edge features of edges that are bonded to
# mask atoms
connected_edge_indices = []
for bond_idx, (u, v) in enumerate(data.edge_index.cpu().numpy().T):
for atom_idx in masked_atom_indices:
if atom_idx in set((u, v)) and \
bond_idx not in connected_edge_indices:
connected_edge_indices.append(bond_idx)
if len(connected_edge_indices) > 0:
# create mask edge labels by copying bond features of the bonds connected to
# the mask atoms
mask_edge_labels_list = []
for bond_idx in connected_edge_indices[::2]: # because the
# edge ordering is such that two directions of a single
# edge occur in pairs, so to get the unique undirected
# edge indices, we take every 2nd edge index from list
mask_edge_labels_list.append(
data.edge_attr[bond_idx].view(1, -1))
data.mask_edge_label = torch.cat(mask_edge_labels_list, dim=0)
# modify the original bond features of the bonds connected to the mask atoms
for bond_idx in connected_edge_indices:
data.edge_attr[bond_idx] = torch.tensor(
[self.num_edge_type, 0])
data.connected_edge_indices = torch.tensor(
connected_edge_indices[::2])
else:
data.mask_edge_label = torch.empty((0, 2)).to(torch.int64)
data.connected_edge_indices = torch.tensor(
connected_edge_indices).to(torch.int64)
return data
def __repr__(self):
if self.mask_num == 0:
return '{}(num_atom_type={}, num_edge_type={}, mask_rate={}, mask_edge={})'.format(
self.__class__.__name__, self.num_atom_type, self.num_edge_type,
self.mask_rate, self.mask_edge)
else:
return '{}(num_atom_type={}, num_edge_type={}, mask_num={}, mask_edge={})'.format(
self.__class__.__name__, self.num_atom_type, self.num_edge_type,
self.mask_num, self.mask_edge)
if __name__ == "__main__":
transform = NegativeEdge()
dataset = MoleculeDataset("dataset/tox21", dataset="tox21")
transform(dataset[0]) | 11,865 | 41.378571 | 100 | py |
GraphLoG | GraphLoG-main/loader.py | import os
import torch
import pickle
import collections
import math
import pandas as pd
import numpy as np
import networkx as nx
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
from rdkit import DataStructs
from rdkit.Chem.rdMolDescriptors import GetMorganFingerprintAsBitVect
from torch.utils import data
from torch_geometric.data import Data
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Batch
from itertools import repeat, product, chain
# allowable node and edge features
allowable_features = {
'possible_atomic_num_list' : list(range(1, 119)),
'possible_formal_charge_list' : [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5],
'possible_chirality_list' : [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER
],
'possible_hybridization_list' : [
Chem.rdchem.HybridizationType.S,
Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2, Chem.rdchem.HybridizationType.UNSPECIFIED
],
'possible_numH_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8],
'possible_implicit_valence_list' : [0, 1, 2, 3, 4, 5, 6],
'possible_degree_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'possible_bonds' : [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC
],
'possible_bond_dirs' : [ # only for double bond stereo information
Chem.rdchem.BondDir.NONE,
Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT
]
}
def mol_to_graph_data_obj_simple(mol):
"""
Converts rdkit mol object to graph Data object required by the pytorch
geometric package. NB: Uses simplified atom and bond features, and represent
as indices
:param mol: rdkit mol object
:return: graph data object with the attributes: x, edge_index, edge_attr
"""
# atoms
num_atom_features = 2 # atom type, chirality tag
atom_features_list = []
for atom in mol.GetAtoms():
atom_feature = [allowable_features['possible_atomic_num_list'].index(
atom.GetAtomicNum())] + [allowable_features[
'possible_chirality_list'].index(atom.GetChiralTag())]
atom_features_list.append(atom_feature)
x = torch.tensor(np.array(atom_features_list), dtype=torch.long)
# bonds
num_bond_features = 2 # bond type, bond direction
if len(mol.GetBonds()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = [allowable_features['possible_bonds'].index(
bond.GetBondType())] + [allowable_features[
'possible_bond_dirs'].index(
bond.GetBondDir())]
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = torch.tensor(np.array(edge_features_list),
dtype=torch.long)
else: # mol has no bonds
edge_index = torch.empty((2, 0), dtype=torch.long)
edge_attr = torch.empty((0, num_bond_features), dtype=torch.long)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
return data
def graph_data_obj_to_mol_simple(data_x, data_edge_index, data_edge_attr):
"""
Convert pytorch geometric data obj to rdkit mol object. NB: Uses simplified
atom and bond features, and represent as indices.
:param: data_x:
:param: data_edge_index:
:param: data_edge_attr
:return:
"""
mol = Chem.RWMol()
# atoms
atom_features = data_x.cpu().numpy()
num_atoms = atom_features.shape[0]
for i in range(num_atoms):
atomic_num_idx, chirality_tag_idx = atom_features[i]
atomic_num = allowable_features['possible_atomic_num_list'][atomic_num_idx]
chirality_tag = allowable_features['possible_chirality_list'][chirality_tag_idx]
atom = Chem.Atom(atomic_num)
atom.SetChiralTag(chirality_tag)
mol.AddAtom(atom)
# bonds
edge_index = data_edge_index.cpu().numpy()
edge_attr = data_edge_attr.cpu().numpy()
num_bonds = edge_index.shape[1]
for j in range(0, num_bonds, 2):
begin_idx = int(edge_index[0, j])
end_idx = int(edge_index[1, j])
bond_type_idx, bond_dir_idx = edge_attr[j]
bond_type = allowable_features['possible_bonds'][bond_type_idx]
bond_dir = allowable_features['possible_bond_dirs'][bond_dir_idx]
mol.AddBond(begin_idx, end_idx, bond_type)
# set bond direction
new_bond = mol.GetBondBetweenAtoms(begin_idx, end_idx)
new_bond.SetBondDir(bond_dir)
# Chem.SanitizeMol(mol) # fails for COC1=CC2=C(NC(=N2)[S@@](=O)CC2=NC=C(
# C)C(OC)=C2C)C=C1, when aromatic bond is possible
# when we do not have aromatic bonds
# Chem.SanitizeMol(mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
return mol
def graph_data_obj_to_nx_simple(data):
"""
Converts graph Data object required by the pytorch geometric package to
network x data object. NB: Uses simplified atom and bond features,
and represent as indices. NB: possible issues with recapitulating relative
stereochemistry since the edges in the nx object are unordered.
:param data: pytorch geometric Data object
:return: network x object
"""
G = nx.Graph()
# atoms
atom_features = data.x.cpu().numpy()
num_atoms = atom_features.shape[0]
for i in range(num_atoms):
atomic_num_idx, chirality_tag_idx = atom_features[i]
G.add_node(i, atom_num_idx=atomic_num_idx, chirality_tag_idx=chirality_tag_idx)
pass
# bonds
edge_index = data.edge_index.cpu().numpy()
edge_attr = data.edge_attr.cpu().numpy()
num_bonds = edge_index.shape[1]
for j in range(0, num_bonds, 2):
begin_idx = int(edge_index[0, j])
end_idx = int(edge_index[1, j])
bond_type_idx, bond_dir_idx = edge_attr[j]
if not G.has_edge(begin_idx, end_idx):
G.add_edge(begin_idx, end_idx, bond_type_idx=bond_type_idx,
bond_dir_idx=bond_dir_idx)
return G
def nx_to_graph_data_obj_simple(G):
"""
Converts nx graph to pytorch geometric Data object. Assume node indices
are numbered from 0 to num_nodes - 1. NB: Uses simplified atom and bond
features, and represent as indices. NB: possible issues with
recapitulating relative stereochemistry since the edges in the nx
object are unordered.
:param G: nx graph obj
:return: pytorch geometric Data object
"""
# atoms
num_atom_features = 2 # atom type, chirality tag
atom_features_list = []
for _, node in G.nodes(data=True):
atom_feature = [node['atom_num_idx'], node['chirality_tag_idx']]
atom_features_list.append(atom_feature)
x = torch.tensor(np.array(atom_features_list), dtype=torch.long)
# bonds
num_bond_features = 2 # bond type, bond direction
if len(G.edges()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for i, j, edge in G.edges(data=True):
edge_feature = [edge['bond_type_idx'], edge['bond_dir_idx']]
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = torch.tensor(np.array(edge_features_list),
dtype=torch.long)
else: # mol has no bonds
edge_index = torch.empty((2, 0), dtype=torch.long)
edge_attr = torch.empty((0, num_bond_features), dtype=torch.long)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
return data
def get_gasteiger_partial_charges(mol, n_iter=12):
"""
Calculates list of gasteiger partial charges for each atom in mol object.
:param mol: rdkit mol object
:param n_iter: number of iterations. Default 12
:return: list of computed partial charges for each atom.
"""
Chem.rdPartialCharges.ComputeGasteigerCharges(mol, nIter=n_iter,
throwOnParamFailure=True)
partial_charges = [float(a.GetProp('_GasteigerCharge')) for a in
mol.GetAtoms()]
return partial_charges
def create_standardized_mol_id(smiles):
"""
:param smiles:
:return: inchi
"""
if check_smiles_validity(smiles):
# remove stereochemistry
smiles = AllChem.MolToSmiles(AllChem.MolFromSmiles(smiles),
isomericSmiles=False)
mol = AllChem.MolFromSmiles(smiles)
if mol != None: # to catch weird issue with O=C1O[al]2oc(=O)c3ccc(cn3)c3ccccc3c3cccc(c3)c3ccccc3c3cc(C(F)(F)F)c(cc3o2)-c2ccccc2-c2cccc(c2)-c2ccccc2-c2cccnc21
if '.' in smiles: # if multiple species, pick largest molecule
mol_species_list = split_rdkit_mol_obj(mol)
largest_mol = get_largest_mol(mol_species_list)
inchi = AllChem.MolToInchi(largest_mol)
else:
inchi = AllChem.MolToInchi(mol)
return inchi
else:
return
else:
return
class MoleculeDataset(InMemoryDataset):
def __init__(self,
root,
#data = None,
#slices = None,
transform=None,
pre_transform=None,
pre_filter=None,
dataset='zinc250k',
empty=False):
"""
Adapted from qm9.py. Disabled the download functionality
:param root: directory of the dataset, containing a raw and processed
dir. The raw dir should contain the file containing the smiles, and the
processed dir can either empty or a previously processed file
:param dataset: name of the dataset. Currently only implemented for
zinc250k, chembl_with_labels, tox21, hiv, bace, bbbp, clintox, esol,
freesolv, lipophilicity, muv, pcba, sider, toxcast
:param empty: if True, then will not load any data obj. For
initializing empty dataset
"""
self.dataset = dataset
self.root = root
super(MoleculeDataset, self).__init__(root, transform, pre_transform,
pre_filter)
self.transform, self.pre_transform, self.pre_filter = transform, pre_transform, pre_filter
if not empty:
self.data, self.slices = torch.load(self.processed_paths[0])
def get(self, idx):
data = Data()
for key in self.data.keys:
item, slices = self.data[key], self.slices[key]
s = list(repeat(slice(None), item.dim()))
s[data.cat_dim(key, item)] = slice(slices[idx],
slices[idx + 1])
data[key] = item[s]
return data
@property
def raw_file_names(self):
file_name_list = os.listdir(self.raw_dir)
# assert len(file_name_list) == 1 # currently assume we have a
# # single raw file
return file_name_list
@property
def processed_file_names(self):
return 'geometric_data_processed.pt'
def download(self):
raise NotImplementedError('Must indicate valid location of raw data. '
'No download allowed')
def process(self):
data_smiles_list = []
data_list = []
if self.dataset == 'zinc_standard_agent':
input_path = self.raw_paths[0]
input_df = pd.read_csv(input_path, sep=',', compression='gzip',
dtype='str')
smiles_list = list(input_df['smiles'])
zinc_id_list = list(input_df['zinc_id'])
for i in range(len(smiles_list)):
print(i)
s = smiles_list[i]
# each example contains a single species
try:
rdkit_mol = AllChem.MolFromSmiles(s)
if rdkit_mol != None: # ignore invalid mol objects
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
id = int(zinc_id_list[i].split('ZINC')[1].lstrip('0'))
data.id = torch.tensor(
[id]) # id here is zinc id value, stripped of
# leading zeros
data_list.append(data)
data_smiles_list.append(smiles_list[i])
except:
continue
elif self.dataset == 'chembl_filtered':
### get downstream test molecules.
from splitters import scaffold_split
###
downstream_dir = [
'dataset/bace',
'dataset/bbbp',
'dataset/clintox',
'dataset/esol',
'dataset/freesolv',
'dataset/hiv',
'dataset/lipophilicity',
'dataset/muv',
# 'dataset/pcba/processed/smiles.csv',
'dataset/sider',
'dataset/tox21',
'dataset/toxcast'
]
downstream_inchi_set = set()
for d_path in downstream_dir:
print(d_path)
dataset_name = d_path.split('/')[1]
downstream_dataset = MoleculeDataset(d_path, dataset=dataset_name)
downstream_smiles = pd.read_csv(os.path.join(d_path,
'processed', 'smiles.csv'),
header=None)[0].tolist()
assert len(downstream_dataset) == len(downstream_smiles)
_, _, _, (train_smiles, valid_smiles, test_smiles) = scaffold_split(downstream_dataset, downstream_smiles, task_idx=None, null_value=0,
frac_train=0.8,frac_valid=0.1, frac_test=0.1,
return_smiles=True)
### remove both test and validation molecules
remove_smiles = test_smiles + valid_smiles
downstream_inchis = []
for smiles in remove_smiles:
species_list = smiles.split('.')
for s in species_list: # record inchi for all species, not just
# largest (by default in create_standardized_mol_id if input has
# multiple species)
inchi = create_standardized_mol_id(s)
downstream_inchis.append(inchi)
downstream_inchi_set.update(downstream_inchis)
smiles_list, rdkit_mol_objs, folds, labels = \
_load_chembl_with_labels_dataset(os.path.join(self.root, 'raw'))
print('processing')
for i in range(len(rdkit_mol_objs)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
if rdkit_mol != None:
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
mw = Descriptors.MolWt(rdkit_mol)
if 50 <= mw <= 900:
inchi = create_standardized_mol_id(smiles_list[i])
if inchi != None and inchi not in downstream_inchi_set:
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor(labels[i, :])
# fold information
if i in folds[0]:
data.fold = torch.tensor([0])
elif i in folds[1]:
data.fold = torch.tensor([1])
else:
data.fold = torch.tensor([2])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'tox21':
smiles_list, rdkit_mol_objs, labels = \
_load_tox21_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
## convert aromatic bonds to double bonds
#Chem.SanitizeMol(rdkit_mol,
#sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'hiv':
smiles_list, rdkit_mol_objs, labels = \
_load_hiv_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor([labels[i]])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'bace':
smiles_list, rdkit_mol_objs, folds, labels = \
_load_bace_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor([labels[i]])
data.fold = torch.tensor([folds[i]])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'bbbp':
smiles_list, rdkit_mol_objs, labels = \
_load_bbbp_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
if rdkit_mol != None:
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor([labels[i]])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'clintox':
smiles_list, rdkit_mol_objs, labels = \
_load_clintox_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
if rdkit_mol != None:
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'esol':
smiles_list, rdkit_mol_objs, labels = \
_load_esol_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor([labels[i]])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'freesolv':
smiles_list, rdkit_mol_objs, labels = \
_load_freesolv_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor([labels[i]])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'lipophilicity':
smiles_list, rdkit_mol_objs, labels = \
_load_lipophilicity_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor([labels[i]])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'muv':
smiles_list, rdkit_mol_objs, labels = \
_load_muv_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'pcba':
smiles_list, rdkit_mol_objs, labels = \
_load_pcba_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'pcba_pretrain':
smiles_list, rdkit_mol_objs, labels = \
_load_pcba_dataset(self.raw_paths[0])
downstream_inchi = set(pd.read_csv(os.path.join(self.root,
'downstream_mol_inchi_may_24_2019'),
sep=',', header=None)[0])
for i in range(len(smiles_list)):
print(i)
if '.' not in smiles_list[i]: # remove examples with
# multiples species
rdkit_mol = rdkit_mol_objs[i]
mw = Descriptors.MolWt(rdkit_mol)
if 50 <= mw <= 900:
inchi = create_standardized_mol_id(smiles_list[i])
if inchi != None and inchi not in downstream_inchi:
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
# elif self.dataset == ''
elif self.dataset == 'sider':
smiles_list, rdkit_mol_objs, labels = \
_load_sider_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'toxcast':
smiles_list, rdkit_mol_objs, labels = \
_load_toxcast_dataset(self.raw_paths[0])
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
if rdkit_mol != None:
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i]) # id here is the index of the mol in
# the dataset
data.y = torch.tensor(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'ptc_mr':
input_path = self.raw_paths[0]
input_df = pd.read_csv(input_path, sep=',', header=None, names=['id', 'label', 'smiles'])
smiles_list = input_df['smiles']
labels = input_df['label'].values
for i in range(len(smiles_list)):
print(i)
s = smiles_list[i]
rdkit_mol = AllChem.MolFromSmiles(s)
if rdkit_mol != None: # ignore invalid mol objects
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i])
data.y = torch.tensor([labels[i]])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'mutag':
smiles_path = os.path.join(self.root, 'raw', 'mutag_188_data.can')
# smiles_path = 'dataset/mutag/raw/mutag_188_data.can'
labels_path = os.path.join(self.root, 'raw', 'mutag_188_target.txt')
# labels_path = 'dataset/mutag/raw/mutag_188_target.txt'
smiles_list = pd.read_csv(smiles_path, sep=' ', header=None)[0]
labels = pd.read_csv(labels_path, header=None)[0].values
for i in range(len(smiles_list)):
print(i)
s = smiles_list[i]
rdkit_mol = AllChem.MolFromSmiles(s)
if rdkit_mol != None: # ignore invalid mol objects
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data.id = torch.tensor(
[i])
data.y = torch.tensor([labels[i]])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
else:
raise ValueError('Invalid dataset name')
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
# write data_smiles_list in processed paths
data_smiles_series = pd.Series(data_smiles_list)
data_smiles_series.to_csv(os.path.join(self.processed_dir,
'smiles.csv'), index=False,
header=False)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
# NB: only properly tested when dataset_1 is chembl_with_labels and dataset_2
# is pcba_pretrain
def merge_dataset_objs(dataset_1, dataset_2):
"""
Naively merge 2 molecule dataset objects, and ignore identities of
molecules. Assumes both datasets have multiple y labels, and will pad
accordingly. ie if dataset_1 has obj_1 with y dim 1310 and dataset_2 has
obj_2 with y dim 128, then the resulting obj_1 and obj_2 will have dim
1438, where obj_1 have the last 128 cols with 0, and obj_2 have
the first 1310 cols with 0.
:return: pytorch geometric dataset obj, with the x, edge_attr, edge_index,
new y attributes only
"""
d_1_y_dim = dataset_1[0].y.size()[0]
d_2_y_dim = dataset_2[0].y.size()[0]
data_list = []
# keep only x, edge_attr, edge_index, padded_y then append
for d in dataset_1:
old_y = d.y
new_y = torch.cat([old_y, torch.zeros(d_2_y_dim, dtype=torch.long)])
data_list.append(Data(x=d.x, edge_index=d.edge_index,
edge_attr=d.edge_attr, y=new_y))
for d in dataset_2:
old_y = d.y
new_y = torch.cat([torch.zeros(d_1_y_dim, dtype=torch.long), old_y.long()])
data_list.append(Data(x=d.x, edge_index=d.edge_index,
edge_attr=d.edge_attr, y=new_y))
# create 'empty' dataset obj. Just randomly pick a dataset and root path
# that has already been processed
new_dataset = MoleculeDataset(root='dataset/chembl_with_labels',
dataset='chembl_with_labels', empty=True)
# collate manually
new_dataset.data, new_dataset.slices = new_dataset.collate(data_list)
return new_dataset
def create_circular_fingerprint(mol, radius, size, chirality):
"""
:param mol:
:param radius:
:param size:
:param chirality:
:return: np array of morgan fingerprint
"""
fp = GetMorganFingerprintAsBitVect(mol, radius,
nBits=size, useChirality=chirality)
return np.array(fp)
class MoleculeFingerprintDataset(data.Dataset):
def __init__(self, root, dataset, radius, size, chirality=True):
"""
Create dataset object containing list of dicts, where each dict
contains the circular fingerprint of the molecule, label, id,
and possibly precomputed fold information
:param root: directory of the dataset, containing a raw and
processed_fp dir. The raw dir should contain the file containing the
smiles, and the processed_fp dir can either be empty or a
previously processed file
:param dataset: name of dataset. Currently only implemented for
tox21, hiv, chembl_with_labels
:param radius: radius of the circular fingerprints
:param size: size of the folded fingerprint vector
:param chirality: if True, fingerprint includes chirality information
"""
self.dataset = dataset
self.root = root
self.radius = radius
self.size = size
self.chirality = chirality
self._load()
def _process(self):
data_smiles_list = []
data_list = []
if self.dataset == 'chembl_with_labels':
smiles_list, rdkit_mol_objs, folds, labels = \
_load_chembl_with_labels_dataset(os.path.join(self.root, 'raw'))
print('processing')
for i in range(len(rdkit_mol_objs)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
if rdkit_mol != None:
# # convert aromatic bonds to double bonds
fp_arr = create_circular_fingerprint(rdkit_mol,
self.radius,
self.size, self.chirality)
fp_arr = torch.tensor(fp_arr)
# manually add mol id
id = torch.tensor([i]) # id here is the index of the mol in
# the dataset
y = torch.tensor(labels[i, :])
# fold information
if i in folds[0]:
fold = torch.tensor([0])
elif i in folds[1]:
fold = torch.tensor([1])
else:
fold = torch.tensor([2])
data_list.append({'fp_arr': fp_arr, 'id': id, 'y': y,
'fold': fold})
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'tox21':
smiles_list, rdkit_mol_objs, labels = \
_load_tox21_dataset(os.path.join(self.root, 'raw/tox21.csv'))
print('processing')
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
## convert aromatic bonds to double bonds
fp_arr = create_circular_fingerprint(rdkit_mol,
self.radius,
self.size,
self.chirality)
fp_arr = torch.tensor(fp_arr)
# manually add mol id
id = torch.tensor([i]) # id here is the index of the mol in
# the dataset
y = torch.tensor(labels[i, :])
data_list.append({'fp_arr': fp_arr, 'id': id, 'y': y})
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'hiv':
smiles_list, rdkit_mol_objs, labels = \
_load_hiv_dataset(os.path.join(self.root, 'raw/HIV.csv'))
print('processing')
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
fp_arr = create_circular_fingerprint(rdkit_mol,
self.radius,
self.size,
self.chirality)
fp_arr = torch.tensor(fp_arr)
# manually add mol id
id = torch.tensor([i]) # id here is the index of the mol in
# the dataset
y = torch.tensor([labels[i]])
data_list.append({'fp_arr': fp_arr, 'id': id, 'y': y})
data_smiles_list.append(smiles_list[i])
else:
raise ValueError('Invalid dataset name')
# save processed data objects and smiles
processed_dir = os.path.join(self.root, 'processed_fp')
data_smiles_series = pd.Series(data_smiles_list)
data_smiles_series.to_csv(os.path.join(processed_dir, 'smiles.csv'),
index=False,
header=False)
with open(os.path.join(processed_dir,
'fingerprint_data_processed.pkl'),
'wb') as f:
pickle.dump(data_list, f)
def _load(self):
processed_dir = os.path.join(self.root, 'processed_fp')
# check if saved file exist. If so, then load from save
file_name_list = os.listdir(processed_dir)
if 'fingerprint_data_processed.pkl' in file_name_list:
with open(os.path.join(processed_dir,
'fingerprint_data_processed.pkl'),
'rb') as f:
self.data_list = pickle.load(f)
# if no saved file exist, then perform processing steps, save then
# reload
else:
self._process()
self._load()
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
## if iterable class is passed, return dataset objection
if hasattr(index, "__iter__"):
dataset = MoleculeFingerprintDataset(self.root, self.dataset, self.radius, self.size, chirality=self.chirality)
dataset.data_list = [self.data_list[i] for i in index]
return dataset
else:
return self.data_list[index]
def _load_tox21_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
tasks = ['NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',
'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
# convert nan to 0
labels = labels.fillna(0)
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_hiv_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['HIV_active']
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_bace_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array
containing indices for each of the 3 folds, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['mol']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['Class']
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
folds = input_df['Model']
folds = folds.replace('Train', 0) # 0 -> train
folds = folds.replace('Valid', 1) # 1 -> valid
folds = folds.replace('Test', 2) # 2 -> test
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
assert len(smiles_list) == len(folds)
return smiles_list, rdkit_mol_objs_list, folds.values, labels.values
def _load_bbbp_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
preprocessed_rdkit_mol_objs_list = [m if m != None else None for m in
rdkit_mol_objs_list]
preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m != None else
None for m in preprocessed_rdkit_mol_objs_list]
labels = input_df['p_np']
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)
assert len(smiles_list) == len(preprocessed_smiles_list)
assert len(smiles_list) == len(labels)
return preprocessed_smiles_list, preprocessed_rdkit_mol_objs_list, \
labels.values
def _load_clintox_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
preprocessed_rdkit_mol_objs_list = [m if m != None else None for m in
rdkit_mol_objs_list]
preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m != None else
None for m in preprocessed_rdkit_mol_objs_list]
tasks = ['FDA_APPROVED', 'CT_TOX']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)
assert len(smiles_list) == len(preprocessed_smiles_list)
assert len(smiles_list) == len(labels)
return preprocessed_smiles_list, preprocessed_rdkit_mol_objs_list, \
labels.values
# input_path = 'dataset/clintox/raw/clintox.csv'
# smiles_list, rdkit_mol_objs_list, labels = _load_clintox_dataset(input_path)
def _load_esol_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels (regression task)
"""
# NB: some examples have multiple species
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['measured log solubility in mols per litre']
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
# input_path = 'dataset/esol/raw/delaney-processed.csv'
# smiles_list, rdkit_mol_objs_list, labels = _load_esol_dataset(input_path)
def _load_freesolv_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels (regression task)
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['expt']
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_lipophilicity_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels (regression task)
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['exp']
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_muv_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
tasks = ['MUV-466', 'MUV-548', 'MUV-600', 'MUV-644', 'MUV-652', 'MUV-689',
'MUV-692', 'MUV-712', 'MUV-713', 'MUV-733', 'MUV-737', 'MUV-810',
'MUV-832', 'MUV-846', 'MUV-852', 'MUV-858', 'MUV-859']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
# convert nan to 0
labels = labels.fillna(0)
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_sider_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
tasks = ['Hepatobiliary disorders',
'Metabolism and nutrition disorders', 'Product issues', 'Eye disorders',
'Investigations', 'Musculoskeletal and connective tissue disorders',
'Gastrointestinal disorders', 'Social circumstances',
'Immune system disorders', 'Reproductive system and breast disorders',
'Neoplasms benign, malignant and unspecified (incl cysts and polyps)',
'General disorders and administration site conditions',
'Endocrine disorders', 'Surgical and medical procedures',
'Vascular disorders', 'Blood and lymphatic system disorders',
'Skin and subcutaneous tissue disorders',
'Congenital, familial and genetic disorders',
'Infections and infestations',
'Respiratory, thoracic and mediastinal disorders',
'Psychiatric disorders', 'Renal and urinary disorders',
'Pregnancy, puerperium and perinatal conditions',
'Ear and labyrinth disorders', 'Cardiac disorders',
'Nervous system disorders',
'Injury, poisoning and procedural complications']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.value
def _load_toxcast_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
# NB: some examples have multiple species, some example smiles are invalid
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
# Some smiles could not be successfully converted
# to rdkit mol object so them to None
preprocessed_rdkit_mol_objs_list = [m if m != None else None for m in
rdkit_mol_objs_list]
preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m != None else
None for m in preprocessed_rdkit_mol_objs_list]
tasks = list(input_df.columns)[1:]
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
# convert nan to 0
labels = labels.fillna(0)
assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)
assert len(smiles_list) == len(preprocessed_smiles_list)
assert len(smiles_list) == len(labels)
return preprocessed_smiles_list, preprocessed_rdkit_mol_objs_list, \
labels.values
def _load_chembl_with_labels_dataset(root_path):
"""
Data from 'Large-scale comparison of machine learning methods for drug target prediction on ChEMBL'
:param root_path: path to the folder containing the reduced chembl dataset
:return: list of smiles, preprocessed rdkit mol obj list, list of np.array
containing indices for each of the 3 folds, np.array containing the labels
"""
# adapted from https://github.com/ml-jku/lsc/blob/master/pythonCode/lstm/loadData.py
# first need to download the files and unzip:
# wget http://bioinf.jku.at/research/lsc/chembl20/dataPythonReduced.zip
# unzip and rename to chembl_with_labels
# wget http://bioinf.jku.at/research/lsc/chembl20/dataPythonReduced/chembl20Smiles.pckl
# into the dataPythonReduced directory
# wget http://bioinf.jku.at/research/lsc/chembl20/dataPythonReduced/chembl20LSTM.pckl
# 1. load folds and labels
f=open(os.path.join(root_path, 'folds0.pckl'), 'rb')
folds=pickle.load(f)
f.close()
f=open(os.path.join(root_path, 'labelsHard.pckl'), 'rb')
targetMat=pickle.load(f)
sampleAnnInd=pickle.load(f)
targetAnnInd=pickle.load(f)
f.close()
targetMat=targetMat
targetMat=targetMat.copy().tocsr()
targetMat.sort_indices()
targetAnnInd=targetAnnInd
targetAnnInd=targetAnnInd-targetAnnInd.min()
folds=[np.intersect1d(fold, sampleAnnInd.index.values).tolist() for fold in folds]
targetMatTransposed=targetMat[sampleAnnInd[list(chain(*folds))]].T.tocsr()
targetMatTransposed.sort_indices()
# # num positive examples in each of the 1310 targets
trainPosOverall=np.array([np.sum(targetMatTransposed[x].data > 0.5) for x in range(targetMatTransposed.shape[0])])
# # num negative examples in each of the 1310 targets
trainNegOverall=np.array([np.sum(targetMatTransposed[x].data < -0.5) for x in range(targetMatTransposed.shape[0])])
# dense array containing the labels for the 456331 molecules and 1310 targets
denseOutputData=targetMat.A # possible values are {-1, 0, 1}
# 2. load structures
f=open(os.path.join(root_path, 'chembl20LSTM.pckl'), 'rb')
rdkitArr=pickle.load(f)
f.close()
assert len(rdkitArr) == denseOutputData.shape[0]
assert len(rdkitArr) == len(folds[0]) + len(folds[1]) + len(folds[2])
preprocessed_rdkitArr = []
print('preprocessing')
for i in range(len(rdkitArr)):
print(i)
m = rdkitArr[i]
if m == None:
preprocessed_rdkitArr.append(None)
else:
mol_species_list = split_rdkit_mol_obj(m)
if len(mol_species_list) == 0:
preprocessed_rdkitArr.append(None)
else:
largest_mol = get_largest_mol(mol_species_list)
if len(largest_mol.GetAtoms()) <= 2:
preprocessed_rdkitArr.append(None)
else:
preprocessed_rdkitArr.append(largest_mol)
assert len(preprocessed_rdkitArr) == denseOutputData.shape[0]
smiles_list = [AllChem.MolToSmiles(m) if m != None else None for m in
preprocessed_rdkitArr] # bc some empty mol in the
# rdkitArr zzz...
assert len(preprocessed_rdkitArr) == len(smiles_list)
return smiles_list, preprocessed_rdkitArr, folds, denseOutputData
# root_path = 'dataset/chembl_with_labels'
def check_smiles_validity(smiles):
try:
m = Chem.MolFromSmiles(smiles)
if m:
return True
else:
return False
except:
return False
def split_rdkit_mol_obj(mol):
"""
Split rdkit mol object containing multiple species or one species into a
list of mol objects or a list containing a single object respectively
:param mol:
:return:
"""
smiles = AllChem.MolToSmiles(mol, isomericSmiles=True)
smiles_list = smiles.split('.')
mol_species_list = []
for s in smiles_list:
if check_smiles_validity(s):
mol_species_list.append(AllChem.MolFromSmiles(s))
return mol_species_list
def get_largest_mol(mol_list):
"""
Given a list of rdkit mol objects, returns mol object containing the
largest num of atoms. If multiple containing largest num of atoms,
picks the first one
:param mol_list:
:return:
"""
num_atoms_list = [len(m.GetAtoms()) for m in mol_list]
largest_mol_idx = num_atoms_list.index(max(num_atoms_list))
return mol_list[largest_mol_idx]
def create_all_datasets():
#### create dataset
downstream_dir = [
'bace',
'bbbp',
'clintox',
'esol',
'freesolv',
'hiv',
'lipophilicity',
'muv',
'sider',
'tox21',
'toxcast'
]
for dataset_name in downstream_dir:
print(dataset_name)
root = "dataset/" + dataset_name
os.makedirs(root + "/processed", exist_ok=True)
dataset = MoleculeDataset(root, dataset=dataset_name)
print(dataset)
dataset = MoleculeDataset(root = "dataset/chembl_filtered", dataset="chembl_filtered")
print(dataset)
dataset = MoleculeDataset(root = "dataset/zinc_standard_agent", dataset="zinc_standard_agent")
print(dataset)
# test MoleculeDataset object
if __name__ == "__main__":
create_all_datasets()
| 56,150 | 41.250564 | 165 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/setup.py | """Setup script for python packaging."""
import site
import sys
from setuptools import setup
# enable installing package for user
# https://github.com/pypa/pip/issues/7953#issuecomment-645133255
site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
setup(
name="imgx",
version="0.1.0",
description="",
author="",
entry_points={
"console_scripts": [
"imgx_train=imgx.run_train:main",
"imgx_valid=imgx.run_valid:main",
"imgx_test=imgx.run_test:main",
"imgx_test_ensemble=imgx.run_test_ensemble:main",
],
},
)
| 593 | 22.76 | 64 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/run_test_ensemble.py | """Script to launch ensemble on test set results."""
import argparse
import json
from collections import defaultdict
from functools import partial
from pathlib import Path
import jax
import jax.numpy as jnp
import numpy as np
import pandas as pd
import SimpleITK as sitk # noqa: N813
from absl import logging
from omegaconf import OmegaConf
from tqdm import tqdm
from imgx.datasets import (
DIR_TFDS_PROCESSED_MAP,
IMAGE_SPACING_MAP,
NUM_CLASSES_MAP,
)
from imgx.exp.eval import (
get_jit_segmentation_metrics,
get_non_jit_segmentation_metrics_per_step,
)
logging.set_verbosity(logging.INFO)
def parse_args() -> argparse.Namespace:
"""Parse arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--log_dir",
type=Path,
help="Folder of wandb.",
default=None,
)
args = parser.parse_args()
return args
def vote_ensemble(test_dir: Path, dir_tfds: Path, num_classes: int) -> None:
"""Ensemble prediction via voting.
Args:
test_dir: path having predictions.
dir_tfds: path of tfds data, having ground truth.
num_classes: number of classes in labels.
"""
# get seed dirs and sort by seeds
lst_seed_dir = sorted(
test_dir.glob("seed_*/"), key=lambda x: int(x.stem.split("_")[-1])
)
num_seeds = len(lst_seed_dir)
# map relative_path to list of full path, corresponding to seeds
path_dict = defaultdict(list)
for seed_dir in lst_seed_dir:
for x in seed_dir.glob("**/*.nii.gz"):
rel_path = x.relative_to(seed_dir)
path_dict[rel_path].append(x)
# vote to ensemble
logging.info("Calculating ensemble predictions.")
for rel_path, pred_paths in path_dict.items():
# a list of shape (D, W, H)
mask_preds = [
sitk.GetArrayFromImage(sitk.ReadImage(x)) for x in pred_paths
]
# (D, W, H, num_classes, num_seeds)
mask_onehot = jax.nn.one_hot(
jnp.stack(mask_preds, axis=-1), num_classes=num_classes, axis=-2
)
# vote (D, W, H)
mask_pred = jnp.argmax(jnp.sum(mask_onehot, axis=-1), axis=-1).astype(
"uint16"
)
# copy meta data
uid = pred_paths[0].stem.split("_")[0]
volume_mask_true = sitk.ReadImage(
dir_tfds / f"{uid}_mask_preprocessed.nii.gz"
)
volume_mask_pred = sitk.GetImageFromArray(mask_pred)
volume_mask_pred.CopyInformation(volume_mask_true)
# save
out_path = test_dir / f"ensemble_{num_seeds}" / rel_path
out_path.parent.mkdir(parents=True, exist_ok=True)
sitk.WriteImage(
image=volume_mask_pred,
fileName=out_path,
useCompression=True,
)
def evaluate_ensemble_prediction(
dir_path: Path, dir_tfds: Path, num_classes: int, spacing: jnp.ndarray
) -> None:
"""Evaluate the saved predictions from ensemble.
Args:
dir_path: path having predictions.
dir_tfds: path of tfds data, having ground truth.
num_classes: number of classes in labels.
spacing: spacing for voxels.
"""
num_steps = int(dir_path.name.split("_")[1])
uids = [
x.name.split("_")[0] for x in (dir_path / "step_0").glob("*.nii.gz")
]
lst_df_scalar = []
for uid in tqdm(uids, total=len(uids)):
# (D, W, H)
mask_true = sitk.GetArrayFromImage(
sitk.ReadImage(dir_tfds / f"{uid}_mask_preprocessed.nii.gz")
)
# (D, W, H, num_classes)
mask_true = jax.nn.one_hot(mask_true, num_classes=num_classes, axis=-1)
# (1, W, H, D, num_classes)
mask_true = jnp.transpose(mask_true, axes=(2, 1, 0, 3))[None, ...]
pred_paths = [
dir_path / f"step_{i}" / f"{uid}_mask_pred.nii.gz"
for i in range(num_steps)
]
# a list of shape (D, W, H)
mask_preds = [
sitk.GetArrayFromImage(sitk.ReadImage(x)) for x in pred_paths
]
# (D, W, H, num_classes, num_steps)
mask_pred = jax.nn.one_hot(
jnp.stack(mask_preds, axis=-1), num_classes=num_classes, axis=-2
)
# (1, W, H, D, num_classes, num_steps)
mask_pred = jnp.transpose(mask_pred, axes=(2, 1, 0, 3, 4))[None, ...]
# metrics
scalars_jit = jax.vmap(
partial(
get_jit_segmentation_metrics,
mask_true=mask_true,
spacing=spacing,
),
in_axes=-1,
out_axes=-1,
)(mask_pred)
scalars_nonjit = get_non_jit_segmentation_metrics_per_step(
mask_pred=mask_pred,
mask_true=mask_true,
spacing=spacing,
)
scalars = {**scalars_jit, **scalars_nonjit}
# flatten per step
scalars_flatten = {}
for k, v in scalars.items():
for i in range(v.shape[-1]):
scalars_flatten[f"{k}_step_{i}"] = v[..., i]
scalars_flatten[k] = v[..., -1]
scalars = scalars_flatten
scalars = jax.tree_map(lambda x: np.asarray(x).tolist(), scalars)
scalars["uid"] = [uid]
lst_df_scalar.append(pd.DataFrame(scalars))
# assemble metrics
df_scalar = pd.concat(lst_df_scalar)
df_scalar = df_scalar.sort_values("uid")
df_scalar.to_csv(dir_path / "metrics_per_sample.csv", index=False)
# average over samples in the dataset
scalars = df_scalar.drop("uid", axis=1).mean().to_dict()
scalars = {"test_" + k: v for k, v in scalars.items()}
scalars["num_images_in_total"] = len(df_scalar)
with open(dir_path / "mean_metrics.json", "w", encoding="utf-8") as f:
json.dump(scalars, f, sort_keys=True, indent=4)
def main() -> None: # pylint:disable=R0915
"""Main function."""
args = parse_args()
config = OmegaConf.load(args.log_dir / "files" / "config_backup.yaml")
if config.task.name != "diffusion":
raise ValueError("Ensemble is only for diffusion.")
data_config = config.data
dir_tfds = DIR_TFDS_PROCESSED_MAP[data_config.name]
spacing = jnp.array(IMAGE_SPACING_MAP[data_config.name])
num_classes = NUM_CLASSES_MAP[data_config["name"]]
test_dir = args.log_dir / "files" / "test_evaluation"
# no ensemble if 1 seed only
lst_seed_dir = sorted(
test_dir.glob("seed_*/"), key=lambda x: int(x.stem.split("_")[-1])
)
if len(lst_seed_dir) == 1:
logging.info("Ensemble not performed as there is one seed only.")
return
# ensemble
vote_ensemble(test_dir=test_dir, dir_tfds=dir_tfds, num_classes=num_classes)
# evaluate
for dir_path in test_dir.glob("ensemble_*/sample_*_steps"):
logging.info(f"Evaluating ensemble predictions metrics for {dir_path}.")
evaluate_ensemble_prediction(
dir_path=dir_path,
dir_tfds=dir_tfds,
num_classes=num_classes,
spacing=spacing,
)
if __name__ == "__main__":
main()
| 7,072 | 31.296804 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/run_test.py | """Script to launch evaluation on test sets."""
import argparse
import json
from pathlib import Path
import jax
import numpy as np
from absl import logging
from omegaconf import OmegaConf
from imgx import TEST_SPLIT
from imgx.device import broadcast_to_local_devices
from imgx.exp import Experiment
from imgx.exp.train_state import get_eval_params_and_state_from_ckpt
logging.set_verbosity(logging.INFO)
def get_checkpoint_dir(
log_dir: Path, num_batch: int, metric: str, max_metric: bool
) -> Path:
"""Get the checkpoint directory.
Args:
log_dir: Directory of entire log.
num_batch: number of batches to select checkpoint.
-1 means select the latest one.
metric: metric to maximise or minimise.
max_metric: maximise the metric or not.
Returns:
A directory having arrays.npy and tree.pkl.
Raises:
ValueError: if any file not found.
"""
ckpt_dir = log_dir / "files" / "ckpt"
if num_batch < 0:
# take the one having the best metrics
best_metric_scalar = -np.inf if max_metric else np.inf
for ckpt_i_dir in ckpt_dir.glob("batch_*/"):
if not ckpt_i_dir.is_dir():
continue
# load metric
num_batch_i = int(ckpt_i_dir.stem.split("_")[-1])
metric_path = ckpt_i_dir / "mean_metrics.json"
if not metric_path.exists():
continue
with open(metric_path, encoding="utf-8") as f:
scalars = json.load(f)
if metric not in scalars:
raise ValueError(f"Metrics {metric} not found in {ckpt_i_dir}")
metric_scalar = scalars[metric]
# use the ckpt if it's the first or its metric is better
# if same performance, prefer being trained for longer
if (
(num_batch < 0)
or (max_metric and (best_metric_scalar <= metric_scalar))
or ((not max_metric) and (best_metric_scalar >= metric_scalar))
):
best_metric_scalar = metric_scalar
num_batch = num_batch_i
if num_batch < 0:
raise ValueError(f"Checkpoint not found under {ckpt_dir}")
ckpt_dir = ckpt_dir / f"batch_{num_batch}"
# sanity check
if not ckpt_dir.exists():
raise ValueError(f"Checkpoint directory {ckpt_dir} does not exist.")
array_path = ckpt_dir / "arrays.npy"
if not array_path.exists():
raise ValueError(f"Checkpoint {array_path} does not exist.")
tree_path = ckpt_dir / "tree.pkl"
if not tree_path.exists():
raise ValueError(f"Checkpoint {tree_path} does not exist.")
return ckpt_dir
def parse_args() -> argparse.Namespace:
"""Parse arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--log_dir",
type=Path,
help="Folder of wandb.",
default=None,
)
parser.add_argument(
"--num_batch",
type=int,
help="Number of batches for identify checkpoint.",
default=-1,
)
parser.add_argument(
"--num_timesteps",
type=int,
help="Number of sampling steps for diffusion.",
default=-1,
)
parser.add_argument(
"--num_seeds",
type=int,
help="Number of seeds for inference.",
default=1,
)
parser.add_argument(
"--metric",
type=str,
help="Metric to select template.",
default="mean_binary_dice_score",
)
parser.add_argument("--max_metric", dest="max_metric", action="store_true")
parser.add_argument("--min_metric", dest="max_metric", action="store_false")
parser.set_defaults(max_metric=True)
args = parser.parse_args()
return args
def main() -> None:
"""Main function."""
args = parse_args()
config = OmegaConf.load(args.log_dir / "files" / "config_backup.yaml")
out_dir = args.log_dir / "files" / "test_evaluation"
if config.task.name == "diffusion":
if args.num_timesteps <= 0:
raise ValueError("num_timesteps required for diffusion.")
config.task.diffusion.num_timesteps = args.num_timesteps
logging.info(f"Sampling {args.num_timesteps} steps.")
ckpt_dir = get_checkpoint_dir(
log_dir=args.log_dir,
num_batch=args.num_batch,
metric=args.metric,
max_metric=args.max_metric,
)
logging.info(f"Using checkpoint {ckpt_dir}.")
# load checkpoint
params, state = get_eval_params_and_state_from_ckpt(
ckpt_dir=ckpt_dir,
use_ema=config.training.ema.use,
)
# prevent any gradient related actions
params = jax.lax.stop_gradient(params)
state = jax.lax.stop_gradient(state)
# inference per seed
for seed in range(args.num_seeds):
logging.info(f"Starting test split evaluation for seed {seed}.")
out_dir_seed = out_dir / f"seed_{seed}"
out_dir_seed.mkdir(parents=True, exist_ok=True)
if config.task.name == "diffusion":
out_dir_seed = out_dir_seed / f"sample_{args.num_timesteps}_steps"
# init exp
# necessary as data set will be exhausted
run = Experiment(config=config)
run.eval_init()
rng = jax.random.PRNGKey(seed)
rng = broadcast_to_local_devices(rng)
run.eval_step(
split=TEST_SPLIT,
params=params,
state=state,
rng=rng,
out_dir=out_dir_seed,
save_predictions=True,
)
logging.info(f"Finished test split evaluation for seed {seed}.")
if __name__ == "__main__":
main()
| 5,684 | 30.236264 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/math_util.py | """Module for math functions."""
import jax
import jax.numpy as jnp
def logits_to_mask(x: jnp.ndarray, axis: int) -> jnp.ndarray:
"""Transform logits to one hot mask.
The one will be on the class having largest logit.
Args:
x: logits.
axis: axis of num_classes.
Returns:
One hot probabilities.
"""
return jax.nn.one_hot(
x=jnp.argmax(x, axis=axis),
num_classes=x.shape[axis],
axis=axis,
)
| 471 | 18.666667 | 61 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/run_valid.py | """Script to launch evaluation on validation tests."""
import argparse
from pathlib import Path
from typing import List
import jax
from absl import logging
from omegaconf import OmegaConf
from imgx import VALID_SPLIT
from imgx.device import broadcast_to_local_devices
from imgx.exp import Experiment
from imgx.exp.train_state import get_eval_params_and_state_from_ckpt
logging.set_verbosity(logging.INFO)
def get_checkpoint_dirs(
log_dir: Path,
) -> List[Path]:
"""Get the directory of all checkpoints.
Args:
log_dir: Directory of entire log.
Returns:
A list of directories having arrays.npy and tree.pkl.
Raises:
ValueError: if any file not found.
"""
ckpt_dir = log_dir / "files" / "ckpt"
ckpt_dirs = []
num_batches = []
for ckpt_i_dir in ckpt_dir.glob("batch_*/"):
if not ckpt_i_dir.is_dir():
continue
array_path = ckpt_i_dir / "arrays.npy"
if not array_path.exists():
continue
tree_path = ckpt_i_dir / "tree.pkl"
if not tree_path.exists():
continue
num_batches.append(int(ckpt_i_dir.stem.split("_")[-1]))
ckpt_dirs.append(ckpt_i_dir)
ckpt_dirs = [
x[1] for x in sorted(zip(num_batches, ckpt_dirs), reverse=True)
]
return ckpt_dirs
def parse_args() -> argparse.Namespace:
"""Parse arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--log_dir",
type=Path,
help="Folder of wandb.",
default=None,
)
parser.add_argument(
"--num_timesteps",
type=int,
help="Number of sampling steps for diffusion.",
default=-1,
)
args = parser.parse_args()
return args
def main() -> None:
"""Main function."""
args = parse_args()
config = OmegaConf.load(args.log_dir / "files" / "config_backup.yaml")
if config.task.name == "diffusion":
if args.num_timesteps <= 0:
raise ValueError("num_timesteps required for diffusion.")
config.task.diffusion.num_timesteps = args.num_timesteps
ckpt_dirs = get_checkpoint_dirs(
log_dir=args.log_dir,
)
# init exp
run = Experiment(config=config)
run.eval_init()
for ckpt_dir in ckpt_dirs:
# load checkpoint
params, state = get_eval_params_and_state_from_ckpt(
ckpt_dir=ckpt_dir,
use_ema=config.training.ema.use,
)
# prevent any gradient related actions
params = jax.lax.stop_gradient(params)
state = jax.lax.stop_gradient(state)
# inference
logging.info(f"Starting valid split evaluation for {ckpt_dir}.")
rng = jax.random.PRNGKey(config.seed)
rng = broadcast_to_local_devices(rng)
run.eval_step(
split=VALID_SPLIT,
params=params,
state=state,
rng=rng,
out_dir=ckpt_dir,
save_predictions=False,
)
# clean up
del params
del state
if __name__ == "__main__":
main()
| 3,106 | 24.891667 | 74 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/config.py | """Module for configuration related functions."""
from typing import Dict
def flatten_dict(d: Dict, parent_key: str = "", sep: str = "_") -> Dict:
"""Flat a nested dict.
Args:
d: dict to flat.
parent_key: key of the parent.
sep: separation string.
Returns:
flatten dict.
"""
items = {}
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, Dict):
items.update(flatten_dict(d=v, parent_key=new_key, sep=sep))
else:
items[new_key] = v
return dict(items)
| 603 | 23.16 | 72 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/__init__.py | """A Jax-based DL toolkit for biomedical and bioinformatics applications."""
from pathlib import Path
# machine error
EPS = 1.0e-5
NAN_MASK = "nan_mask"
# path for all non-tensorflow-dataset data sets
DIR_DATA = Path("datasets")
# splits
TRAIN_SPLIT = "train"
VALID_SPLIT = "valid"
TEST_SPLIT = "test"
# jax device
# one model can be stored across multiple shards/slices
# given 8 devices, it can be grouped into 4x2
# if num_devices_per_replica = 2, then one model is stored across 2 devices
# so the replica_axis would be of size 4
SHARD_AXIS = "shard_axis"
REPLICA_AXIS = "replica_axis"
# data dict keys
UID = "uid"
IMAGE = "image"
LABEL = "label"
| 657 | 21.689655 | 76 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/run_train.py | """Script to launch training."""
from pathlib import Path
import hydra
import jax
import wandb
from absl import logging
from omegaconf import DictConfig, OmegaConf
from imgx import VALID_SPLIT
from imgx.config import flatten_dict
from imgx.exp import Experiment
from imgx.exp.train_state import get_eval_params_and_state, save_ckpt
logging.set_verbosity(logging.INFO)
def set_debug_config(config: DictConfig) -> DictConfig:
"""Modify config for debugging purpose.
Args:
config: original config.
Returns:
modified config.
Raises:
ValueError: if data set is unknown.
"""
# reduce all model size
config.model.unet3d.num_channels = (1, 2, 4, 8)
config.model.unet3d_slice.num_channels = (1, 2, 4, 8)
config.model.unet3d_time.num_channels = (1, 2, 4, 8)
config.model.unet3d_slice_time.num_channels = (1, 2, 4, 8)
# make training shorter
n_devices = jax.local_device_count()
config.data.max_num_samples = 11
config.training.batch_size_per_replica = 2
config.training.batch_size = (
n_devices * config.training.batch_size_per_replica
)
config.training.max_num_samples = 100
# make logging more frequent
config.logging.eval_freq = 1
config.logging.save_freq = 4
return config
def get_batch_size_per_step(config: DictConfig) -> int:
"""Return the actual number of samples per step.
Args:
config: total config.
Returns:
Number of samples across all devices.
"""
if "batch_size_per_replica" not in config["training"]:
logging.warning("Batch size per step is not accurate.")
return 1
num_devices = jax.local_device_count()
num_devices_per_replica = config["training"]["num_devices_per_replica"]
num_models = num_devices // num_devices_per_replica
batch_size = config["training"]["batch_size_per_replica"] * num_models
return batch_size
@hydra.main(
version_base=None, config_path="conf", config_name="config_segmentation"
)
def main( # pylint:disable=too-many-statements
config: DictConfig,
) -> None:
"""Main function.
Args:
config: config loaded from yaml.
"""
# update config
if config.debug:
config = set_debug_config(config)
# init wandb
files_dir = None
if config.logging.wandb.project:
wandb_run = wandb.init(
project=config.logging.wandb.project,
entity=config.logging.wandb.entity,
config=flatten_dict(dict(config)),
)
files_dir = Path(wandb_run.settings.files_dir)
# backup config
OmegaConf.save(config=config, f=files_dir / "config_backup.yaml")
# init devices
devices = jax.local_devices()
if config.training.num_devices_per_replica != 1:
raise ValueError("Distributed training not supported.")
logging.info(f"Local devices are: {devices}")
# init exp
run = Experiment(config=config)
train_state = run.train_init()
run.eval_init()
logging.info("Start training.")
batch_size_per_step = get_batch_size_per_step(config)
max_num_steps = config.training.max_num_samples // batch_size_per_step
for i in range(1, 1 + max_num_steps):
# train step
train_state, train_scalars = run.train_step(
train_state=train_state,
)
train_scalars = {"train_" + k: v for k, v in train_scalars.items()}
scalars = {
"num_samples": i * batch_size_per_step,
**train_scalars,
}
to_save_ckpt = (
(i > 0)
and (i % config.logging.save_freq == 0)
and (files_dir is not None)
)
# evaluate if saving ckpt or time to evaluate
to_eval = to_save_ckpt or (i % config.logging.eval_freq == 0)
if to_save_ckpt and (files_dir is not None):
ckpt_dir = files_dir / "ckpt" / f"batch_{i}"
else:
ckpt_dir = None
if to_eval and config.eval:
# TODO on TPU evaluation causes OOM
params, state = get_eval_params_and_state(train_state)
val_scalars = run.eval_step(
split=VALID_SPLIT,
params=params,
state=state,
rng=jax.random.PRNGKey(config.seed),
out_dir=ckpt_dir,
save_predictions=False,
)
val_scalars = {"valid_" + k: v for k, v in val_scalars.items()}
scalars = {
**scalars,
**val_scalars,
}
if config.logging.wandb.project:
wandb.log(scalars)
scalars = {
k: v if isinstance(v, int) else f"{v:.2e}"
for k, v in scalars.items()
}
logging.info(f"Batch {i}: {scalars}")
# save checkpoint and metrics
if ckpt_dir is not None:
save_ckpt(
train_state=train_state,
ckpt_dir=ckpt_dir,
)
# backup config every time
OmegaConf.save(config=config, f=ckpt_dir / "config.yaml")
logging.info(f"Checkpoint saved at {ckpt_dir}")
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
| 5,235 | 29.619883 | 76 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/device.py | """Module to handle multi-devices."""
from typing import Optional, Tuple, Union
import chex
import jax
import jax.numpy as jnp
def broadcast_to_local_devices(value: chex.ArrayTree) -> chex.ArrayTree:
"""Broadcasts an object to all local devices.
Args:
value: value to be broadcast.
Returns:
broadcast value.
"""
devices = jax.local_devices()
return jax.tree_map(
lambda v: jax.device_put_sharded(len(devices) * [v], devices), value
)
def get_first_replica_values(value: chex.ArrayTree) -> chex.ArrayTree:
"""Gets values from the first replica.
Args:
value: broadcast value.
Returns:
value of the first replica.
"""
return jax.tree_map(lambda x: x[0], value)
def bind_rng_to_host_or_device(
rng: jnp.ndarray,
bind_to: Optional[str] = None,
axis_name: Optional[Union[str, Tuple[str, ...]]] = None,
) -> jnp.ndarray:
"""Binds a rng to the host or device.
https://github.com/google-research/scenic/blob/main/scenic/train_lib/train_utils.py#L577
Must be called from within a pmapped function. Note that when binding to
"device", we also bind the rng to hosts, as we fold_in the rng with
axis_index, which is unique for devices across all hosts.
Args:
rng: A jax.random.PRNGKey.
bind_to: Must be one of the 'host' or 'device'. None means no binding.
axis_name: The axis of the devices we are binding rng across, necessary
if bind_to is device.
Returns:
jax.random.PRNGKey specialized to host/device.
"""
if bind_to is None:
return rng
if bind_to == "host":
return jax.random.fold_in(rng, jax.process_index())
if bind_to == "device":
return jax.random.fold_in(rng, jax.lax.axis_index(axis_name))
raise ValueError(
"`bind_to` should be one of the `[None, 'host', 'device']`"
)
def shard(
pytree: chex.ArrayTree,
num_replicas: int,
) -> chex.ArrayTree:
"""Reshapes all arrays in the pytree to add a leading shard dimension.
We assume that all arrays in the pytree have leading dimension
divisible by num_devices_per_replica.
Args:
pytree: A pytree of arrays to be sharded.
num_replicas: number of model replicas.
Returns:
Sharded data.
"""
def _shard_array(array: jnp.ndarray) -> jnp.ndarray:
return array.reshape((num_replicas, -1) + array.shape[1:])
return jax.tree_map(_shard_array, pytree)
def unshard(pytree: chex.ArrayTree) -> chex.ArrayTree:
"""Reshapes arrays from [ndev, bs, ...] to [host_bs, ...].
Args:
pytree: A pytree of arrays to be sharded.
Returns:
Sharded data.
"""
def _unshard_array(array: jnp.ndarray) -> jnp.ndarray:
ndev, bs = array.shape[:2]
return array.reshape((ndev * bs,) + array.shape[2:])
return jax.tree_map(_unshard_array, pytree)
def is_tpu() -> bool:
"""Return true if the device is tpu.
Returns:
True if tpu.
"""
return jax.local_devices()[0].platform == "tpu"
| 3,095 | 25.689655 | 92 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/metric/area.py | """Metrics to measure foreground area."""
import jax.numpy as jnp
def class_proportion(mask: jnp.ndarray) -> jnp.ndarray:
"""Calculate proportion per class.
Args:
mask: shape = (batch, d1, ..., dn, num_classes).
Returns:
Proportion, shape = (batch, num_classes).
"""
reduce_axes = tuple(range(1, mask.ndim - 1))
volume = jnp.float32(jnp.prod(jnp.array(mask.shape[1:-1])))
sqrt_volume = jnp.sqrt(volume)
mask = jnp.float32(mask)
# attempt to avoid over/underflow
return jnp.sum(mask / sqrt_volume, axis=reduce_axes) / sqrt_volume
| 590 | 27.142857 | 70 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/metric/distribution.py | """Metric functions for probability distributions."""
import jax.numpy as jnp
def normal_kl(
p_mean: jnp.ndarray,
p_log_variance: jnp.ndarray,
q_mean: jnp.ndarray,
q_log_variance: jnp.ndarray,
) -> jnp.ndarray:
"""Compute the KL divergence between two 1D normal distributions.
Although the inputs are arrays, each value is considered independently.
This function is not symmetric.
Input array shapes should be broadcastable.
Args:
p_mean: mean of distribution p.
p_log_variance: log variance of distribution p.
q_mean: mean of distribution q.
q_log_variance: log variance of distribution q.
Returns:
KL divergence.
"""
return 0.5 * (
-1.0
+ q_log_variance
- p_log_variance
+ jnp.exp(p_log_variance - q_log_variance)
+ ((p_mean - q_mean) ** 2) * jnp.exp(-q_log_variance)
)
def approx_standard_normal_cdf(x: jnp.ndarray) -> jnp.ndarray:
"""Approximate cumulative distribution function of standard normal.
if x ~ Normal(mean, var), then cdf(z) = p(x <= z)
https://www.aimspress.com/article/doi/10.3934/math.2022648#b13
https://www.jstor.org/stable/2346872
Args:
x: array of any shape with any float values.
Returns:
CDF estimation.
"""
return 0.5 * (
1.0 + jnp.tanh(jnp.sqrt(2.0 / jnp.pi) * (x + 0.044715 * x**3))
)
def discretized_gaussian_log_likelihood(
x: jnp.ndarray,
mean: jnp.ndarray,
log_variance: jnp.ndarray,
x_delta: float = 1.0 / 255.0,
x_bound: float = 0.999,
) -> jnp.ndarray:
"""Log-likelihood of a normal distribution discretizing to an image.
Args:
x: target image, with value inside normalized in [-1, 1].
mean: normal distribution mean.
log_variance: log of distribution variance.
x_delta: discretization step, used to estimate probability.
x_bound: values with abs > x_bound are calculated differently.
Returns:
Discretized log likelihood over 2*delta.
"""
log_scales = 0.5 * log_variance
centered_x = x - mean
inv_stdv = jnp.exp(-log_scales)
# let y be a variable
# cdf(z+delta) = p(y <= z+delta)
plus_in = inv_stdv * (centered_x + x_delta)
cdf_plus = approx_standard_normal_cdf(plus_in)
# log( p(y <= z+delta) )
log_cdf_plus = jnp.log(cdf_plus.clip(min=1e-12))
# cdf(z-delta) = p(y <= z-delta)
minus_in = inv_stdv * (centered_x - x_delta)
cdf_minus = approx_standard_normal_cdf(minus_in)
# log( 1-p(y <= z-delta) ) = log( p(y > z-delta) )
log_one_minus_cdf_minus = jnp.log((1.0 - cdf_minus).clip(min=1e-12))
# p(z-delta < y <= z+delta)
cdf_delta = cdf_plus - cdf_minus
log_cdf_delta = jnp.log(cdf_delta.clip(min=1e-12))
# if x < -0.999, log( p(y <= z+delta) )
# if x > 0.999, log( p(y > z-delta) )
# if -0.999 <= x <= 0.999, log( p(z-delta < y <= z+delta) )
return jnp.where(
x < -x_bound,
log_cdf_plus,
jnp.where(x > x_bound, log_one_minus_cdf_minus, log_cdf_delta),
)
| 3,097 | 29.07767 | 75 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/metric/dice.py | """Metric functions for image segmentation."""
import jax.numpy as jnp
def dice_score(
mask_pred: jnp.ndarray,
mask_true: jnp.ndarray,
) -> jnp.ndarray:
"""Soft Dice score, larger is better.
Args:
mask_pred: soft mask with probabilities, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
Returns:
Dice score of shape (batch, num_classes).
"""
reduce_axis = tuple(range(mask_pred.ndim))[1:-1]
numerator = 2.0 * jnp.sum(mask_pred * mask_true, axis=reduce_axis)
denominator = jnp.sum(mask_pred + mask_true, axis=reduce_axis)
return jnp.where(
condition=denominator > 0,
x=numerator / denominator,
y=jnp.nan,
)
def iou(
mask_pred: jnp.ndarray,
mask_true: jnp.ndarray,
) -> jnp.ndarray:
"""IOU (Intersection Over Union), or Jaccard index.
Args:
mask_pred: binary mask of predictions, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
Returns:
IoU of shape (batch, num_classes).
"""
reduce_axis = tuple(range(mask_pred.ndim))[1:-1]
numerator = jnp.sum(mask_pred * mask_true, axis=reduce_axis)
sum_mask = jnp.sum(mask_pred + mask_true, axis=reduce_axis)
denominator = sum_mask - numerator
return jnp.where(
condition=sum_mask > 0, x=numerator / denominator, y=jnp.nan
)
| 1,407 | 28.333333 | 75 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/metric/surface_distance.py | """Surface distance metric.
Functions are all numpy based, as they rely on scipy and not jittable for JAX.
References:
https://github.com/deepmind/surface-distance
https://github.com/Project-MONAI/MONAI/blob/dev/monai/metrics/surface_distance.py
"""
from functools import partial
from itertools import combinations
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
from scipy.ndimage import binary_erosion, distance_transform_edt
def get_invalid_bounding_box(
mask: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Return all -1 values as an invalid bounding box.
Args:
mask: boolean mask, with n spatial axes.
Returns:
- bbox_min, [-1] * n.
- bbox_min, [-1] * n.
"""
ndim_spatial = len(mask.shape)
bbox_min = -np.ones(ndim_spatial, np.int32)
bbox_max = -np.ones(ndim_spatial, np.int32)
return bbox_min, bbox_max
def get_valid_binary_mask_bounding_box(
mask: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Get the bounding box of foreground with start-end positions.
If there is no foreground, return -1 for all outputs.
Args:
mask: boolean mask, with only spatial axes.
Returns:
- bbox_min, [start_in_1st_spatial_dim, ...], inclusive, starts at zero.
- bbox_max, [end_in_1st_spatial_dim, ...], exclusive, starts at zero.
"""
ndim_spatial = len(mask.shape)
bbox_min = []
bbox_max = []
for axes_to_reduce in combinations(
reversed(range(ndim_spatial)), ndim_spatial - 1
):
mask_reduced = np.amax(mask, axis=axes_to_reduce)
bbox_min_axis = np.argmax(mask_reduced)
bbox_max_axis = mask_reduced.shape[0] - np.argmax(np.flip(mask_reduced))
bbox_min.append(bbox_min_axis)
bbox_max.append(bbox_max_axis)
return np.stack(bbox_min), np.stack(bbox_max)
def get_binary_mask_bounding_box(
mask: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Get the bounding box of foreground with start-end positions.
If there is no foreground, return -1 for all outputs.
Args:
mask: boolean mask, with only spatial axes.
Returns:
- bbox_min, [start_in_1st_spatial_dim, ...], inclusive, starts at zero.
- bbox_max, [end_in_1st_spatial_dim, ...], exclusive, starts at zero.
"""
if mask.dtype != np.bool_:
mask = mask > 0
if np.any(mask):
return get_valid_binary_mask_bounding_box(mask)
return get_invalid_bounding_box(mask)
def get_mask_edges(
mask_pred: np.ndarray, mask_true: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Do binary erosion and use XOR for input to get the edges.
Args:
mask_pred: the predicted binary mask.
mask_true: the ground truth binary mask.
Returns:
edge_pred: the predicted binary edge.
edge_true: the ground truth binary edge.
"""
if mask_pred.dtype != np.bool_:
mask_pred = mask_pred > 0
if mask_true.dtype != np.bool_:
mask_true = mask_true > 0
mask_union = mask_pred | mask_true
if not mask_union.any():
# if no foreground prediction and ground truth, return zero
return np.zeros_like(mask_union), np.zeros_like(mask_union)
bbox_min, bbox_max = get_binary_mask_bounding_box(mask=mask_union)
for i, (v_min, v_max) in enumerate(zip(bbox_min, bbox_max)):
mask_pred = mask_pred.take(indices=range(v_min, v_max), axis=i)
mask_true = mask_true.take(indices=range(v_min, v_max), axis=i)
edge_pred = binary_erosion(mask_pred) ^ mask_pred
edge_true = binary_erosion(mask_true) ^ mask_true
return edge_pred, edge_true
def get_surface_distance(
edge_pred: np.ndarray,
edge_true: np.ndarray,
spacing: Optional[Tuple[float, ...]] = None,
) -> np.ndarray:
"""Calculate surface distance from predicted edges to ground truth.
Args:
edge_pred: the predicted binary edge.
edge_true: the ground truth binary edge.
spacing: spacing of pixel/voxels along each dimension.
Returns:
surface distance, 1D array of len = edge size.
"""
distance_transform = distance_transform_edt(
input=~edge_true, sampling=spacing
)
surface_distance = np.asarray(distance_transform[edge_pred])
return surface_distance
def _aggregated_symmetric_surface_distance(
dist_pred_true: np.ndarray,
dist_true_pred: np.ndarray,
f: Callable,
num_args: int,
) -> float:
"""Aggregate surface distance in a symmetric way.
Args:
dist_pred_true: surface distance from predicted edges to ground truth.
dist_true_pred: surface distance from ground truth edges to predicted.
f: an aggregation function taking one or two arguments.
num_args: number of arguments for f. It has to be passed manually,
because it is not possible to get the number for partial functions.
Returns:
Aggregated values.
"""
if num_args == 1:
return max(f(dist_pred_true), f(dist_true_pred))
if num_args == 2:
return f(dist_pred_true, dist_true_pred)
raise ValueError(
"Symmetric surface distance aggregation function "
f"should take one or two arguments, got {num_args}."
)
def _aggregated_surface_distance(
mask_pred: np.ndarray,
mask_true: np.ndarray,
agg_fn_list: List[Callable],
num_args_list: List[int],
spacing: Optional[Tuple[float, ...]],
symmetric: bool = True,
) -> np.ndarray:
"""Calculate aggregated surface distance.
Args:
mask_pred: one hot predictions with only spatial dimensions.
mask_true: one hot targets with only spatial dimensions.
agg_fn_list: a list of functions
to aggregate a list of distances.
num_args_list: a list of ints, corresponding to number of arguments
for agg_fn_list.
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
Returns:
Aggregated surface distance of shape (batch, num_classes).
"""
if not (mask_pred.any() and mask_true.any()):
# prediction or ground truth do not have foreground
return np.array([np.nan for _ in agg_fn_list])
edge_pred, edge_true = get_mask_edges(
mask_pred=mask_pred,
mask_true=mask_true,
)
dist_pred_true = get_surface_distance(
edge_pred=edge_pred, edge_true=edge_true, spacing=spacing
)
if symmetric:
# swap edge_pred, edge_true and calculate again
dist_true_pred = get_surface_distance(
edge_pred=edge_true, edge_true=edge_pred, spacing=spacing
)
if dist_pred_true.size == 0 or dist_true_pred.size == 0:
return np.array([np.nan for _ in agg_fn_list])
return np.array(
[
_aggregated_symmetric_surface_distance(
dist_pred_true=dist_pred_true,
dist_true_pred=dist_true_pred,
f=f,
num_args=num_args,
)
for f, num_args in zip(agg_fn_list, num_args_list)
]
)
# not symmetric, just need dist_pred_true
if dist_pred_true.size == 0:
return np.array([np.nan for _ in agg_fn_list])
return np.array([f(dist_pred_true) for f in agg_fn_list])
def aggregated_surface_distance(
mask_pred: np.ndarray,
mask_true: np.ndarray,
agg_fns: Union[Callable, List[Callable]],
num_args: Union[int, List[int]],
spacing: Optional[Tuple[float, ...]],
symmetric: bool = True,
) -> np.ndarray:
"""Calculate aggregated surface distance on batch.
Args:
mask_pred: one hot predictions, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
agg_fns: a function or a list of functions
to aggregate a list of distances.
num_args: an int or a list of ints, corresponding to number of arguments
for agg_fn_list.
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
Returns:
Aggregated surface distance of shape (num_funcs, batch, num_classes).
"""
agg_fn_list = agg_fns if isinstance(agg_fns, list) else [agg_fns]
num_args_list = num_args if isinstance(num_args, list) else [num_args]
if len(agg_fn_list) != len(num_args_list):
raise ValueError("agg_funcs and num_args lengths do not match.")
num_agg_fns = len(agg_fn_list)
batch_size = mask_pred.shape[0]
num_classes = mask_pred.shape[-1]
agg_dist_arr = np.zeros((num_agg_fns, batch_size, num_classes))
for batch_index, class_idx in np.ndindex(batch_size, num_classes):
agg_dist_arr[:, batch_index, class_idx] = _aggregated_surface_distance(
mask_pred=mask_pred[batch_index, ..., class_idx],
mask_true=mask_true[batch_index, ..., class_idx],
agg_fn_list=agg_fn_list,
num_args_list=num_args_list,
spacing=spacing,
symmetric=symmetric,
)
if num_agg_fns == 1:
return agg_dist_arr[0, ...]
return agg_dist_arr
def average_surface_distance(
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Optional[Tuple[float, ...]],
symmetric: bool = True,
) -> np.ndarray:
"""Calculate average surface distance on batch.
Args:
mask_pred: one hot predictions, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
Returns:
Average surface distance of shape (batch, num_classes).
"""
return aggregated_surface_distance(
mask_pred=mask_pred,
mask_true=mask_true,
agg_fns=np.mean,
num_args=1,
spacing=spacing,
symmetric=symmetric,
)
def hausdorff_distance(
mask_pred: np.ndarray,
mask_true: np.ndarray,
percentile: int,
spacing: Optional[Tuple[float, ...]],
symmetric: bool = True,
) -> np.ndarray:
"""Calculate hausdorff distance on batch.
Args:
mask_pred: one hot predictions, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
percentile: hausdorff distance is the percentile of surface distances.
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
Returns:
Hausdorff distance of shape (batch, num_classes).
"""
return aggregated_surface_distance(
mask_pred=mask_pred,
mask_true=mask_true,
agg_fns=partial(np.percentile, q=percentile),
num_args=1,
spacing=spacing,
symmetric=symmetric,
)
def normalized_surface_dice_from_distances(
dist_pred_true: np.ndarray,
dist_true_pred: np.ndarray,
tolerance_mm: float = 1.0,
) -> float:
"""Calculate the normalized surface dice at a specified tolerance.
The implementation is similar to MONAI
https://github.com/Project-MONAI/MONAI/blob/dev/monai/metrics/surface_dice.py,
and different from DeepMind,
https://github.com/deepmind/surface-distance/blob/master/surface_distance/metrics.py,
where deepmind used Marching Cubes,
https://graphics.stanford.edu/~mdfisher/MarchingCubes.html
to estimate surface area corresponding to each voxel,
and is therefore more accurate but slower.
MONAI's implementation uses surface distances only,
and per voxel on the boundary/edge has equal weights, unlike DeepMind.
Args:
dist_pred_true: surface distance from predicted edges to ground truth.
dist_true_pred: surface distance from ground truth edges to predicted.
tolerance_mm: tolerance value to consider surface being overlapping.
Returns:
A float value between [0.0, 1.0].
"""
boundary_complete = len(dist_pred_true) + len(dist_true_pred)
if boundary_complete == 0:
# the class is neither present in the prediction
# nor in the reference segmentation
return np.nan
boundary_correct = np.sum(dist_pred_true <= tolerance_mm) + np.sum(
dist_true_pred <= tolerance_mm
)
return boundary_correct / boundary_complete
def normalized_surface_dice(
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Optional[Tuple[float, ...]],
tolerance_mm: float = 1.0,
) -> np.ndarray:
"""Calculate the normalized surface dice at a specified tolerance on batch.
Args:
mask_pred: one hot predictions, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
spacing: spacing of pixel/voxels along each dimension.
tolerance_mm: tolerance value to consider surface being overlapping.
Returns:
Hausdorff distance of shape (batch, num_classes).
"""
return aggregated_surface_distance(
mask_pred=mask_pred,
mask_true=mask_true,
agg_fns=partial(
normalized_surface_dice_from_distances, tolerance_mm=tolerance_mm
),
num_args=2,
spacing=spacing,
symmetric=True,
)
| 13,562 | 33.336709 | 89 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/metric/__init__.py | """Module for metrics."""
from imgx.metric.area import class_proportion
from imgx.metric.centroid import centroid_distance
from imgx.metric.dice import dice_score, iou
from imgx.metric.surface_distance import (
aggregated_surface_distance,
average_surface_distance,
hausdorff_distance,
normalized_surface_dice,
normalized_surface_dice_from_distances,
)
__all__ = [
"dice_score",
"iou",
"average_surface_distance",
"aggregated_surface_distance",
"normalized_surface_dice",
"normalized_surface_dice_from_distances",
"hausdorff_distance",
"centroid_distance",
"class_proportion",
]
| 636 | 25.541667 | 50 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/metric/centroid.py | """Metric centroid distance."""
from typing import Optional, Tuple
import jax.numpy as jnp
def get_coordinate_grid(shape: Tuple[int, ...]) -> jnp.ndarray:
"""Generate a grid with given shape.
This function is not jittable as the output depends on the value of shapes.
Args:
shape: shape of the grid, (d1, ..., dn).
Returns:
grid: grid coordinates, of shape (n, d1, ..., dn).
grid[:, i1, ..., in] = [i1, ..., in]
"""
return jnp.stack(
jnp.meshgrid(
*(jnp.arange(d) for d in shape),
indexing="ij",
),
axis=0,
dtype=jnp.float32,
)
def get_centroid(
mask: jnp.ndarray,
grid: jnp.ndarray,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Calculate the centroid of the mask.
Args:
mask: boolean mask of shape = (batch, d1, ..., dn, num_classes)
grid: shape = (n, d1, ..., dn)
Returns:
centroid of shape = (batch, n, num_classes).
nan mask of shape = (batch, num_classes).
"""
mask_reduce_axes = tuple(range(1, mask.ndim - 1))
grid_reduce_axes = tuple(range(2, mask.ndim))
# (batch, n, d1, ..., dn, num_classes)
masked_grid = jnp.expand_dims(mask, axis=1) * jnp.expand_dims(
grid, axis=(0, -1)
)
# (batch, n, num_classes)
numerator = jnp.sum(masked_grid, axis=grid_reduce_axes)
# (batch, num_classes)
summed_mask = jnp.sum(mask, axis=mask_reduce_axes)
# (batch, 1, num_classes)
denominator = summed_mask[:, None, :]
# if mask is not empty return real centroid, else nan
centroid = jnp.where(
condition=denominator > 0, x=numerator / denominator, y=jnp.nan
)
return centroid, summed_mask == 0
def centroid_distance(
mask_true: jnp.ndarray,
mask_pred: jnp.ndarray,
grid: jnp.ndarray,
spacing: Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
"""Calculate the L2-distance between two centroids.
Args:
mask_true: shape = (batch, d1, ..., dn, num_classes).
mask_pred: shape = (batch, d1, ..., dn, num_classes).
grid: shape = (n, d1, ..., dn).
spacing: spacing of pixel/voxels along each dimension, (n,).
Returns:
distance, shape = (batch, num_classes).
"""
# centroid (batch, n, num_classes) nan_mask (batch, num_classes)
centroid_true, nan_mask_true = get_centroid(
mask=mask_true,
grid=grid,
)
centroid_pred, nan_mask_pred = get_centroid(
mask=mask_pred,
grid=grid,
)
nan_mask = nan_mask_true | nan_mask_pred
if spacing is not None:
centroid_true = jnp.where(
condition=nan_mask[:, None, :],
x=jnp.nan,
y=centroid_true * spacing[None, :, None],
)
centroid_pred = jnp.where(
condition=nan_mask[:, None, :],
x=jnp.nan,
y=centroid_pred * spacing[None, :, None],
)
# return nan if the centroid cannot be defined for one sample with one class
return jnp.where(
condition=nan_mask,
x=jnp.nan,
y=jnp.linalg.norm(centroid_true - centroid_pred, axis=1),
)
| 3,160 | 28.542056 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/datasets/augmentation.py | """Image augmentation functions."""
from functools import partial
from typing import Callable, Dict, Sequence
import jax
import numpy as np
from jax import numpy as jnp
from jax.scipy.ndimage import map_coordinates
from omegaconf import DictConfig
from imgx import IMAGE, LABEL
from imgx.datasets import FOREGROUND_RANGE, IMAGE_SHAPE_MAP
from imgx.metric.centroid import get_coordinate_grid
def get_2d_rotation_matrix(
radians: jnp.ndarray,
) -> jnp.ndarray:
"""Return 2d rotation matrix given radians.
The affine transformation applies as following:
[x, = [[* * 0] * [x,
y, [* * 0] y,
1] [0 0 1]] 1]
Args:
radians: tuple of one values, correspond to xy planes.
Returns:
Rotation matrix of shape (3, 3).
"""
sin, cos = jnp.sin(radians[0]), jnp.cos(radians[0])
return jnp.array(
[
[cos, -sin, 0.0],
[sin, cos, 0.0],
[0.0, 0.0, 1.0],
]
)
def get_3d_rotation_matrix(
radians: jnp.ndarray,
) -> jnp.ndarray:
"""Return 3d rotation matrix given radians.
The affine transformation applies as following:
[x, = [[* * * 0] * [x,
y, [* * * 0] y,
z, [* * * 0] z,
1] [0 0 0 1]] 1]
Args:
radians: tuple of three values, correspond to yz, xz, xy planes.
Returns:
Rotation matrix of shape (4, 4).
"""
affine = jnp.eye(4)
# rotation of yz around x-axis
sin, cos = jnp.sin(radians[0]), jnp.cos(radians[0])
affine_ax = jnp.array(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, cos, -sin, 0.0],
[0.0, sin, cos, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
affine = jnp.matmul(affine_ax, affine)
# rotation of zx around y-axis
sin, cos = jnp.sin(radians[1]), jnp.cos(radians[1])
affine_ax = jnp.array(
[
[cos, 0.0, sin, 0.0],
[0.0, 1.0, 0.0, 0.0],
[-sin, 0.0, cos, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
affine = jnp.matmul(affine_ax, affine)
# rotation of xy around z-axis
sin, cos = jnp.sin(radians[2]), jnp.cos(radians[2])
affine_ax = jnp.array(
[
[cos, -sin, 0.0, 0.0],
[sin, cos, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
affine = jnp.matmul(affine_ax, affine)
return affine
def get_rotation_matrix(
radians: jnp.ndarray,
) -> jnp.ndarray:
"""Return rotation matrix given radians.
Args:
radians: correspond to rotate around each axis.
Returns:
Rotation matrix of shape (n+1, n+1).
Raises:
ValueError: if not 2D or 3D.
"""
if radians.size == 1:
return get_2d_rotation_matrix(radians)
if radians.size == 3:
return get_3d_rotation_matrix(radians)
raise ValueError("Only support 2D/3D rotations.")
def get_translation_matrix(
shifts: jnp.ndarray,
) -> jnp.ndarray:
"""Return 3d translation matrix given shifts.
For example, the 3D affine transformation applies as following:
[x, = [[1 0 0 *] * [x,
y, [0 1 0 *] y,
z, [0 0 1 *] z,
1] [0 0 0 1]] 1]
Args:
shifts: correspond to each axis shift.
Returns:
Translation matrix of shape (n+1, n+1).
"""
ndims = shifts.size
shifts = jnp.concatenate([shifts, jnp.array([1.0])])
return jnp.concatenate(
[
jnp.eye(ndims + 1, ndims),
shifts[:, None],
],
axis=1,
)
def get_scaling_matrix(
scales: jnp.ndarray,
) -> jnp.ndarray:
"""Return scaling matrix given scales.
For example, the 3D affine transformation applies as following:
[x, = [[* 0 0 0] * [x,
y, [0 * 0 0] y,
z, [0 0 * 0] z,
1] [0 0 0 1]] 1]
Args:
scales: correspond to each axis scaling.
Returns:
Affine matrix of shape (n+1, n+1).
"""
scales = jnp.concatenate([scales, jnp.array([1.0])])
return jnp.diag(scales)
def get_affine_matrix(
radians: jnp.ndarray,
shifts: jnp.ndarray,
scales: jnp.ndarray,
) -> jnp.ndarray:
"""Return an affine matrix from parameters.
The matrix is not squared, as the last row is not needed. For rotation,
translation, and scaling matrix, they are kept for composition purpose.
For example, the 3D affine transformation applies as following:
[x, = [[* * * *] * [x,
y, [* * * *] y,
z, [* * * *] z,
1] [0 0 0 1]] 1]
Args:
radians: correspond to rotate around each axis.
shifts: correspond to each axis shift.
scales: correspond to each axis scaling.
Returns:
Affine matrix of shape (n+1, n+1).
"""
affine_rot = get_rotation_matrix(radians)
affine_shift = get_translation_matrix(shifts)
affine_scale = get_scaling_matrix(scales)
return jnp.matmul(affine_shift, jnp.matmul(affine_scale, affine_rot))
def batch_get_random_affine_matrix(
key: jax.random.PRNGKey,
max_rotation: jnp.ndarray,
min_translation: jnp.ndarray,
max_translation: jnp.ndarray,
max_scaling: jnp.ndarray,
) -> jnp.ndarray:
"""Get a batch of random affine matrices.
Args:
key: jax random key.
max_rotation: maximum rotation in radians.
min_translation: minimum translation in pixel/voxels.
max_translation: maximum translation in pixel/voxels.
max_scaling: maximum scaling difference in pixel/voxels.
Returns:
Affine matrix of shape (batch, n+1, n+1).
"""
key_radian, key_shift, key_scale = jax.random.split(key, num=3)
radians = jax.random.uniform(
key=key_radian,
shape=max_rotation.shape,
minval=-max_rotation,
maxval=max_rotation,
)
shifts = jax.random.uniform(
key=key_shift,
shape=max_translation.shape,
minval=min_translation,
maxval=max_translation,
)
scales = jax.random.uniform(
key=key_scale,
shape=max_scaling.shape,
minval=1.0 - max_scaling,
maxval=1.0 + max_scaling,
)
# vmap on first axis, which is a batch
return jax.vmap(get_affine_matrix)(radians, shifts, scales)
def apply_affine_to_grid(
grid: jnp.ndarray, affine_matrix: jnp.ndarray
) -> jnp.ndarray:
"""Apply affine matrix to grid.
The grid has non-negative coordinates, means the origin is at a corner.
Need to shift the grid such that the origin is at center,
then apply affine, then shift the origin back.
Args:
grid: grid coordinates, of shape (n, d1, ..., dn).
grid[:, i1, ..., in] = [i1, ..., in]
affine_matrix: shape (n+1, n+1)
Returns:
Grid with updated coordinates.
"""
# (n+1, d1, ..., dn)
extended_grid = jnp.concatenate(
[grid, jnp.ones((1,) + grid.shape[1:])], axis=0
)
# shift to center
shift = (jnp.array(grid.shape[1:]) - 1) / 2
shift_matrix = get_translation_matrix(-shift) # (n+1, n+1)
# (n+1, n+1) * (n+1, d1, ..., dn) = (n+1, d1, ..., dn)
extended_grid = jnp.einsum("ji,i...->j...", shift_matrix, extended_grid)
# affine
# (n+1, n+1) * (n+1, d1, ..., dn) = (n+1, d1, ..., dn)
extended_grid = jnp.einsum("ji,i...->j...", affine_matrix, extended_grid)
# shift to corner
shift_matrix = get_translation_matrix(shift)[:-1, :] # (n, n+1)
# (n, n+1) * (n+1, d1, ..., dn) = (n, d1, ..., dn)
extended_grid = jnp.einsum("ji,i...->j...", shift_matrix, extended_grid)
return extended_grid
def batch_apply_affine_to_grid(
grid: jnp.ndarray, affine_matrix: jnp.ndarray
) -> jnp.ndarray:
"""Apply batch of affine matrix to grid.
Args:
grid: grid coordinates, of shape (n, d1, ..., dn).
grid[:, i1, ..., in] = [i1, ..., in]
affine_matrix: shape (batch, n+1, n+1).
Returns:
Grid with updated coordinates, shape (batch, n, d1, ..., dn).
"""
return jax.vmap(apply_affine_to_grid, in_axes=(None, 0))(
grid, affine_matrix
)
def batch_resample_image_label(
input_dict: Dict[str, jnp.ndarray],
grid: jnp.ndarray,
) -> Dict[str, jnp.ndarray]:
"""Apply batch of affine matrix to image and label.
Args:
input_dict: dict having image and label.
image shape (batch, d1, ..., dn)
grid: grid coordinates, of shape (batch, n, d1, ..., dn).
Returns:
Updated image and label, of same shape.
"""
resample_image_vmap = jax.vmap(
partial(
map_coordinates,
order=1,
mode="constant",
cval=0.0,
),
in_axes=(0, 0),
)
resample_label_vmap = jax.vmap(
partial(
map_coordinates,
order=0,
mode="constant",
cval=0.0,
),
in_axes=(0, 0),
)
image = resample_image_vmap(
input_dict[IMAGE],
grid,
)
label = resample_label_vmap(
input_dict[LABEL],
grid,
)
return {IMAGE: image, LABEL: label}
def batch_random_affine_transform(
key: jax.random.PRNGKey,
input_dict: Dict[str, jnp.ndarray],
grid: jnp.ndarray,
max_rotation: jnp.ndarray,
max_translation: jnp.ndarray,
max_scaling: jnp.ndarray,
) -> Dict[str, jnp.ndarray]:
"""Keep image and label only.
TODO: image does not have channel.
Args:
key: jax random key.
input_dict: dict having image and label.
image shape (batch, d1, ..., dn)
grid: grid coordinates, of shape (n, d1, ..., dn).
grid[:, i1, ..., in] = [i1, ..., in]
max_rotation: maximum rotation in radians, shape = (batch, ...).
max_translation: maximum translation in pixel/voxels,
shape = (batch, ...).
max_scaling: maximum scaling difference in pixel/voxels,
shape = (batch, ...).
Returns:
Augmented dict having image and label.
image and label all have shapes (batch, d1, ..., dn).
"""
# (batch, ...)
batch_size = input_dict[IMAGE].shape[0]
max_rotation = jnp.tile(max_rotation[None, ...], (batch_size, 1))
max_translation = jnp.tile(max_translation[None, ...], (batch_size, 1))
min_translation = -max_translation
max_scaling = jnp.tile(max_scaling[None, ...], (batch_size, 1))
# refine translation to avoid remove classes
shape = jnp.array(input_dict[LABEL].shape[1:])
shape = jnp.tile(shape[None, ...], (batch_size, 1))
max_translation = jnp.minimum(
max_translation, shape - 1 - input_dict[FOREGROUND_RANGE][..., -1]
)
min_translation = jnp.maximum(
min_translation, -input_dict[FOREGROUND_RANGE][..., 0]
)
# (batch, n+1, n+1)
affine_matrix = batch_get_random_affine_matrix(
key=key,
max_rotation=max_rotation,
min_translation=min_translation,
max_translation=max_translation,
max_scaling=max_scaling,
)
# (batch, n, d1, ..., dn)
grid = batch_apply_affine_to_grid(grid=grid, affine_matrix=affine_matrix)
return batch_resample_image_label(
input_dict=input_dict,
grid=grid,
)
def build_aug_fn_from_fns(
fns: Sequence[Callable],
) -> Callable:
"""Combine a list of data augmentation functions.
Args:
fns: entire config.
Returns:
A data augmentation function.
"""
def aug_fn(
key: jax.random.PRNGKey, input_dict: Dict[str, jnp.ndarray]
) -> Dict[str, jnp.ndarray]:
keys = jax.random.split(key, num=len(fns))
for k, fn in zip(keys, fns):
input_dict = fn(k, input_dict)
return input_dict
return aug_fn
def build_aug_fn_from_config(
config: DictConfig,
) -> Callable:
"""Return a data augmentation function.
Args:
config: entire config.
Returns:
A data augmentation function.
"""
data_config = config.data
dataset_name = data_config["name"]
image_shape = IMAGE_SHAPE_MAP[dataset_name]
da_config = data_config[dataset_name]["data_augmentation"]
grid = get_coordinate_grid(shape=image_shape)
max_rotation = np.array(da_config["max_rotation"])
max_translation = np.array(da_config["max_translation"])
max_scaling = np.array(da_config["max_scaling"])
aug_fns = [
partial(
batch_random_affine_transform,
grid=grid,
max_rotation=max_rotation,
max_translation=max_translation,
max_scaling=max_scaling,
)
]
if len(aug_fns) == 1:
return aug_fns[0]
return build_aug_fn_from_fns(aug_fns)
| 12,812 | 26.793926 | 77 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/datasets/iterator.py | """Dataset related classes and functions."""
from functools import partial
from typing import Callable, Dict, Iterator, Optional, Tuple
import jax
import jax.numpy as jnp
import jax.scipy
import jmp
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import logging
from omegaconf import DictConfig
from imgx import IMAGE, LABEL, TEST_SPLIT, TRAIN_SPLIT, VALID_SPLIT
from imgx.datasets import FOREGROUND_RANGE, Dataset
from imgx.datasets.util import (
get_foreground_range,
maybe_pad_batch,
tf_to_numpy,
)
from imgx.device import shard
def create_image_label_dict_from_dict(
x: Dict[str, tf.Tensor],
) -> Dict[str, tf.Tensor]:
"""Create a dict from inputs.
Args:
x: dict having image, label, and other tensors.
Returns:
Dict having image and label.
"""
return {
IMAGE: x[IMAGE],
LABEL: x[LABEL],
FOREGROUND_RANGE: get_foreground_range(x[LABEL]),
}
def load_split_from_image_tfds_builder(
builder: tfds.core.DatasetBuilder,
batch_size: int,
split: str,
augment_train_example_fn: Optional[Callable] = None,
shuffle_buffer_size: Optional[int] = None,
shuffle_seed: int = 0,
max_num_samples: int = -1,
dtype: jnp.dtype = jnp.float32,
) -> Tuple[tf.data.Dataset, int]:
"""Loads a split from a TensorFlow Dataset compatible builder.
https://github.com/google-research/scenic/blob/main/scenic/dataset_lib/dataset_utils.py
Args:
builder: A TFDS compatible dataset builder.
batch_size: The batch size returned by the data pipeline.
split: Name of the split to be loaded.
augment_train_example_fn: A function that given a train example
returns the augmented example. Note that this function is applied
AFTER caching and repeat to get true randomness.
shuffle_buffer_size: Size of the tf.data.dataset shuffle buffer.
shuffle_seed: Seed for shuffling the training data.
max_num_samples: maximum number of samples to consider.
dtype: data type for images.
Returns:
- A repeated dataset.
- Number of steps after batch if the dataset is not repeated,
returns -1 for training.
"""
is_train = split == TRAIN_SPLIT
# Prepare arguments.
shuffle_buffer_size = shuffle_buffer_size or (8 * batch_size)
# Download data.
builder.download_and_prepare()
# Each host is responsible for a fixed subset of data.
if is_train:
split = tfds.even_splits(split, jax.process_count())[
jax.process_index()
]
dataset = builder.as_dataset(
split=split,
shuffle_files=False,
)
# Shrink data set if required
if max_num_samples > 0:
logging.info(
f"Taking first {max_num_samples} data samples for split {split}."
)
dataset = dataset.take(max_num_samples)
# Caching.
dataset = dataset.cache()
num_steps = -1 # not set for training
if is_train:
# First repeat then batch.
dataset = dataset.repeat()
# Augmentation should be done after repeat for true randomness.
if augment_train_example_fn:
dataset = dataset.map(
augment_train_example_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
# Shuffle after augmentation to avoid loading non-augmented images into
# buffer.
dataset = dataset.shuffle(shuffle_buffer_size, seed=shuffle_seed)
dataset = dataset.batch(batch_size, drop_remainder=True)
else:
# First batch then repeat.
dataset = dataset.batch(batch_size, drop_remainder=False)
num_steps = tf.data.experimental.cardinality(dataset).numpy()
if split == VALID_SPLIT:
# repeat dataset for validation
dataset = dataset.repeat()
# NOTE: You may be tempted to move the casting earlier on in the pipeline,
# but for bf16 some operations will end up silently placed on the TPU and
# this causes stalls while TF and JAX battle for the accelerator.
if dtype != jnp.float32:
def cast_fn(batch: Dict[str, jnp.ndarray]) -> Dict[str, jnp.ndarray]:
batch[IMAGE] = tf.cast(batch[IMAGE], tf.dtypes.as_dtype(dtype))
return batch
dataset = dataset.map(cast_fn)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset, num_steps
def get_image_iterator(
builder: tfds.core.DatasetBuilder,
split: str,
is_train: bool,
batch_size_per_replica: int,
num_replicas: int,
shuffle_seed: int,
max_num_samples: int,
dtype: jnp.dtype = jnp.float32,
) -> Tuple[Iterator, int]:
"""Returns iterator from builder.
Args:
builder: data set builder.
split: split name.
is_train: if the split is for training.
batch_size_per_replica: Number of samples consumed per model per step.
num_replicas: number of model replicas.
shuffle_seed: Seed for shuffling the training data.
max_num_samples: maximum number of samples in iterator.
dtype: data type for images.
Returns:
- Batch iterator.
- Number of steps after batch if the dataset is not repeated,
returns -1 for training.
"""
batch_size = batch_size_per_replica * num_replicas
dataset, num_steps = load_split_from_image_tfds_builder(
builder=builder,
batch_size=batch_size,
split=split,
shuffle_seed=shuffle_seed,
augment_train_example_fn=create_image_label_dict_from_dict,
max_num_samples=max_num_samples,
dtype=dtype,
)
maybe_pad_batches = partial(
maybe_pad_batch, is_train=is_train, batch_size=batch_size
)
dataset_iter = iter(dataset)
dataset_iter = map(tf_to_numpy, dataset_iter)
dataset_iter = map(maybe_pad_batches, dataset_iter)
shard_batches = partial(shard, num_replicas=num_replicas)
dataset_iter = map(shard_batches, dataset_iter)
return dataset_iter, num_steps
def get_image_tfds_dataset(
dataset_name: str,
config: DictConfig,
) -> Dataset:
"""Returns generators for the dataset train, valid, and test sets.
Args:
dataset_name: Data set name.
config: entire config.
Returns:
A Dataset() which includes train_iter, valid_iter, and test_iter.
"""
batch_size_per_replica = config["training"]["batch_size_per_replica"]
num_devices_per_replica = config["training"]["num_devices_per_replica"]
num_replicas = jax.local_device_count() // num_devices_per_replica
shuffle_seed = config["seed"]
max_num_samples = config["data"]["max_num_samples"]
dtype = jnp.float32
if config["training"]["mixed_precision"]["use"]:
dtype = jmp.half_dtype()
builder = tfds.builder(dataset_name)
train_iter, _ = get_image_iterator(
builder=builder,
split=TRAIN_SPLIT,
is_train=True,
batch_size_per_replica=batch_size_per_replica,
num_replicas=num_replicas,
shuffle_seed=shuffle_seed,
max_num_samples=max_num_samples,
dtype=dtype,
)
valid_iter, num_valid_steps = get_image_iterator(
builder=builder,
split=VALID_SPLIT,
is_train=False,
batch_size_per_replica=batch_size_per_replica,
num_replicas=num_replicas,
shuffle_seed=shuffle_seed,
max_num_samples=max_num_samples,
dtype=dtype,
)
test_iter, num_test_steps = get_image_iterator(
builder=builder,
split=TEST_SPLIT,
is_train=False,
batch_size_per_replica=batch_size_per_replica,
num_replicas=num_replicas,
shuffle_seed=shuffle_seed,
max_num_samples=max_num_samples,
dtype=dtype,
)
return Dataset(
train_iter=train_iter,
valid_iter=valid_iter,
test_iter=test_iter,
num_valid_steps=num_valid_steps,
num_test_steps=num_test_steps,
)
| 8,041 | 31.297189 | 91 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/datasets/util.py | """Util functions for image.
Some are adapted from
https://github.com/google-research/scenic/blob/03735eb81f64fd1241c4efdb946ea6de3d326fe1/scenic/dataset_lib/dataset_utils.py
"""
import functools
import queue
import threading
from typing import Any, Callable, Dict, Generator, Iterable, Tuple
import chex
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tensorflow.experimental.numpy as tnp
from absl import logging
from imgx import IMAGE
def maybe_pad_batch(
batch: Dict[str, chex.ArrayTree],
is_train: bool,
batch_size: int,
batch_dim: int = 0,
) -> Dict[str, chex.ArrayTree]:
"""Zero pad the batch on the right to the batch_size.
All leave tensors in the batch pytree will be padded. This function expects
the root structure of the batch pytree to be a dictionary and returns a
dictionary with the same structure (and substructures), additionally with
the key 'batch_mask' added to the root dict, with 1.0 indicating indices
which are true data and 0.0 indicating a padded index. `batch_mask` will
be used for calculating the weighted cross entropy, or weighted accuracy.
Note that in this codebase, we assume we drop the last partial batch from
the training set, so if the batch is from the training set
(i.e. `train=True`), or when the batch is from the test/validation set,
but it is a complete batch, we *modify* the batch dict by adding an array of
ones as the `batch_mask` of all examples in the batch. Otherwise, we create
a new dict that has the padded patch and its corresponding `batch_mask`
array. Note that batch_mask can be also used as the label mask
(not input mask), for task that are pixel/token level. This is simply done
by applying the mask we make for padding the partial batches on top of
the existing label mask.
Args:
batch: A dictionary containing a pytree. If `inputs_key` is not
set, we use the first leave to get the current batch size.
Otherwise, the tensor mapped with `inputs_key`
at the root dictionary is used.
is_train: if the batch is from the training data. In that case,
we drop the last (incomplete) batch and thus don't do any padding.
batch_size: All arrays in the dict will be padded to have first
dimension equal to desired_batch_size.
batch_dim: Batch dimension. The default is 0, but it can be different
if a sharded batch is given.
Returns:
A dictionary mapping the same keys to the padded batches.
Additionally, we add a key representing weights, to indicate how
the batch was padded.
Raises:
ValueError: if configs are conflicting.
"""
sample_tensor = batch[IMAGE]
batch_pad = batch_size - sample_tensor.shape[batch_dim]
if is_train and batch_pad != 0:
raise ValueError(
"In this codebase, we assumed that we always drop the "
"last partial batch of the train set. Please use "
"` drop_remainder=True` for the training set."
)
# Most batches do not need padding, so we quickly return to avoid slowdown.
if is_train or batch_pad == 0:
return batch
def zero_pad(array: np.ndarray) -> np.ndarray:
pad_with = (
[(0, 0)] * batch_dim
+ [(0, batch_pad)]
+ [(0, 0)] * (array.ndim - batch_dim - 1)
)
return np.pad(array, pad_with, mode="constant")
padded_batch = jax.tree_map(zero_pad, batch)
return padded_batch
def unpad(
pytree: chex.ArrayTree,
num_samples: int,
) -> chex.ArrayTree:
"""Remove padded data for all arrays in the pytree.
We assume that all arrays in the pytree have the same leading dimension.
Args:
pytree: A pytree of arrays to be sharded.
num_samples: number of samples to keep.
Returns:
Data without padding
"""
def _unpad_array(x: jnp.ndarray) -> jnp.ndarray:
return x[:num_samples, ...]
return jax.tree_map(_unpad_array, pytree)
def tf_to_numpy(batch: Dict) -> np.ndarray:
"""Convert an input batch from tf Tensors to numpy arrays.
Args:
batch: A dictionary that has items in a batch: image and labels.
Returns:
Numpy arrays of the given tf Tensors.
"""
def convert_data(x: tf.Tensor) -> np.ndarray:
"""Use _numpy() for zero-copy conversion between TF and NumPy.
Args:
x: tf tensor.
Returns:
numpy array.
"""
return x._numpy() # pylint: disable=protected-access
return jax.tree_map(convert_data, batch)
def get_center_pad_shape(
current_shape: Tuple[int, ...], target_shape: Tuple[int, ...]
) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:
"""Get pad sizes for sitk.ConstantPad.
The padding is added symmetrically.
Args:
current_shape: current shape of the image.
target_shape: target shape of the image.
Returns:
pad_lower: shape to pad on the lower side.
pad_upper: shape to pad on the upper side.
"""
pad_lower = []
pad_upper = []
for i, size_i in enumerate(current_shape):
pad_i = max(target_shape[i] - size_i, 0)
pad_lower_i = pad_i // 2
pad_upper_i = pad_i - pad_lower_i
pad_lower.append(pad_lower_i)
pad_upper.append(pad_upper_i)
return tuple(pad_lower), tuple(pad_upper)
def get_center_crop_shape(
current_shape: Tuple[int, ...], target_shape: Tuple[int, ...]
) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:
"""Get crop sizes for sitk.Crop.
The crop is performed symmetrically.
Args:
current_shape: current shape of the image.
target_shape: target shape of the image.
Returns:
crop_lower: shape to pad on the lower side.
crop_upper: shape to pad on the upper side.
"""
crop_lower = []
crop_upper = []
for i, size_i in enumerate(current_shape):
crop_i = max(size_i - target_shape[i], 0)
crop_lower_i = crop_i // 2
crop_upper_i = crop_i - crop_lower_i
crop_lower.append(crop_lower_i)
crop_upper.append(crop_upper_i)
return tuple(crop_lower), tuple(crop_upper)
def try_to_get_center_crop_shape(
label_min: int, label_max: int, current_length: int, target_length: int
) -> Tuple[int, int]:
"""Try to crop at the center of label, 1D.
Args:
label_min: label index minimum, inclusive.
label_max: label index maximum, exclusive.
current_length: current image length.
target_length: target image length.
Returns:
crop_lower: shape to pad on the lower side.
crop_upper: shape to pad on the upper side.
Raises:
ValueError: if label min max is out of range.
"""
if label_min < 0 or label_max > current_length:
raise ValueError("Label index out of range.")
if current_length <= target_length:
# no need of crop
return 0, 0
# attend to perform crop centered at label center
label_center = (label_max - 1 + label_min) / 2.0
bbox_lower = int(np.ceil(label_center - target_length / 2.0))
bbox_upper = bbox_lower + target_length
# if lower is negative, then have to shift the window to right
bbox_lower = max(bbox_lower, 0)
# if upper is too large, then have to shift the window to left
if bbox_upper > current_length:
bbox_lower -= bbox_upper - current_length
# calculate crop
crop_lower = bbox_lower # bbox index starts at 0
crop_upper = current_length - target_length - crop_lower
return crop_lower, crop_upper
def get_center_crop_shape_from_bbox(
bbox_min: Tuple[int, ...],
bbox_max: Tuple[int, ...],
current_shape: Tuple[int, ...],
target_shape: Tuple[int, ...],
) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:
"""Get crop sizes for sitk.Crop from label bounding box.
The crop is not necessarily performed symmetrically.
Args:
bbox_min: [start_in_1st_spatial_dim, ...], inclusive, starts at zero.
bbox_max: [end_in_1st_spatial_dim, ...], exclusive, starts at zero.
current_shape: current shape of the image.
target_shape: target shape of the image.
Returns:
crop_lower: shape to pad on the lower side.
crop_upper: shape to pad on the upper side.
"""
crop_lower = []
crop_upper = []
for i, current_length in enumerate(current_shape):
crop_lower_i, crop_upper_i = try_to_get_center_crop_shape(
label_min=bbox_min[i],
label_max=bbox_max[i],
current_length=current_length,
target_length=target_shape[i],
)
crop_lower.append(crop_lower_i)
crop_upper.append(crop_upper_i)
return tuple(crop_lower), tuple(crop_upper)
def get_foreground_range(label: tf.Tensor) -> tf.Tensor:
"""Get the foreground range for a given label.
This function is not defined in jax for augmentation because,
nonzero is not jittable as the number of nonzero elements is unknown.
Args:
label: shape (d1, ..., dn), here n = ndim below.
Returns:
shape (ndim, 2), for each dimension, it's [min, max].
"""
# (ndim, num_nonzero)
nonzero_indices = tnp.stack(tnp.nonzero(label))
# (ndim, 2)
return tnp.stack(
[tnp.min(nonzero_indices, axis=-1), tnp.max(nonzero_indices, axis=-1)],
axis=-1,
)
def get_function_name(function: Callable[..., Any]) -> str:
"""Get name of any function.
Args:
function: function to query.
Returns:
function name.
"""
if isinstance(function, functools.partial):
return f"partial({function.func.__name__})"
return function.__name__
def py_prefetch(
iterable_function: Callable[[], Iterable[chex.ArrayTree]],
buffer_size: int = 5,
) -> Generator[chex.ArrayTree, None, None]:
"""Performs prefetching of elements from an iterable in a separate thread.
Args:
iterable_function: A python function that when called with no arguments
returns an iterable. This is used to build a fresh iterable for each
thread (crucial if working with tensorflow datasets because
`tf.graph` objects are thread local).
buffer_size (int): Number of elements to keep in the prefetch buffer.
Yields:
Prefetched elements from the original iterable.
Raises:
ValueError: if the buffer_size <= 1.
Any error thrown by the iterable_function. Note this is not
raised inside the producer, but after it finishes executing.
"""
if buffer_size <= 1:
raise ValueError("the buffer_size should be > 1")
buffer: queue.Queue = queue.Queue(maxsize=(buffer_size - 1))
producer_error = []
end = object()
def producer() -> None:
"""Enqueues items from iterable on a given thread."""
try:
# Build a new iterable for each thread. This is crucial if
# working with tensorflow datasets
# because tf.graph objects are thread local.
iterable = iterable_function()
for item in iterable:
buffer.put(item)
except Exception as err: # pylint: disable=broad-except
logging.exception(
"Error in producer thread for %s",
get_function_name(iterable_function),
)
producer_error.append(err)
finally:
buffer.put(end)
threading.Thread(target=producer, daemon=True).start()
# Consumer.
while True:
value = buffer.get()
if value is end:
break
yield value
if producer_error:
raise producer_error[0]
| 11,835 | 32.247191 | 123 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/datasets/__init__.py | """Dataset module to build tensorflow datasets."""
from collections import namedtuple
from pathlib import Path
from imgx.datasets.amos_ct.amos_ct_dataset_builder import (
AMOS_CT_IMAGE_SHAPE,
AMOS_CT_IMAGE_SPACING,
AMOS_NUM_CLASSES,
AMOS_TFDS_FOLD,
)
from imgx.datasets.male_pelvic_mr.male_pelvic_mr_dataset_builder import (
PELVIC_IMAGE_SHAPE,
PELVIC_IMAGE_SPACING,
PELVIC_NUM_CLASSES,
PELVIC_TFDS_FOLD,
)
Dataset = namedtuple(
"Dataset",
[
"train_iter",
"valid_iter",
"test_iter",
# valid_iter is repeated, needs to know how many batches to eval
"num_valid_steps",
# for simplicity, better to know how many batches to eval
"num_test_steps",
],
)
DIR_TFDS: Path = Path.home() / "tensorflow_datasets"
# segmentation task
FOREGROUND_RANGE = "foreground_range"
# supported datasets
MALE_PELVIC_MR = "male_pelvic_mr"
AMOS_CT = "amos_ct"
IMAGE_SPACING_MAP = {
MALE_PELVIC_MR: PELVIC_IMAGE_SPACING,
AMOS_CT: AMOS_CT_IMAGE_SPACING,
}
DIR_TFDS_PROCESSED_MAP = {
MALE_PELVIC_MR: DIR_TFDS
/ "downloads"
/ "extracted"
/ PELVIC_TFDS_FOLD
/ "preprocessed",
AMOS_CT: DIR_TFDS
/ "downloads"
/ "extracted"
/ AMOS_TFDS_FOLD
/ "preprocessed",
}
IMAGE_SHAPE_MAP = {
MALE_PELVIC_MR: PELVIC_IMAGE_SHAPE,
AMOS_CT: AMOS_CT_IMAGE_SHAPE,
}
NUM_CLASSES_MAP = {
MALE_PELVIC_MR: PELVIC_NUM_CLASSES,
AMOS_CT: AMOS_NUM_CLASSES,
}
| 1,471 | 22 | 73 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/datasets/preprocess.py | """Preprocess functions using sitk."""
from pathlib import Path
from typing import List, Optional, Tuple
import numpy as np
import SimpleITK as sitk # noqa: N813
from imgx.datasets.util import (
get_center_crop_shape_from_bbox,
get_center_pad_shape,
)
from imgx.metric.surface_distance import get_binary_mask_bounding_box
def check_image_and_label(
image_volume: sitk.Image,
label_volume: sitk.Image,
image_path: Path,
label_path: Path,
rtol: float = 1.0e-5,
atol: float = 1.0e-3,
) -> None:
"""Check if metadata matches between image and label.
Args:
image_volume: loaded image.
label_volume: loaded label.
image_path: image file path.
label_path: label file path.
rtol: relative tolerance for sanity check, 1E-5 is too big.
atol: absolute tolerance for sanity check, 1E-8 is too big.
Raises:
ValueError: if image and label metadata does not match
"""
if image_volume.GetSize() != label_volume.GetSize():
raise ValueError(
f"Image and label sizes are not the same for "
f"{image_path} and {label_path}: "
f"{image_volume.GetSize()} and {label_volume.GetSize()}."
)
if not np.allclose(
image_volume.GetSpacing(),
label_volume.GetSpacing(),
rtol=rtol,
atol=atol,
):
raise ValueError(
f"Image and label spacing are not the same for "
f"{image_path} and {label_path}: "
f"{image_volume.GetSpacing()} and {label_volume.GetSpacing()}."
)
if not np.allclose(
image_volume.GetDirection(),
label_volume.GetDirection(),
rtol=rtol,
atol=atol,
):
arr_image = np.array(image_volume.GetDirection())
arr_label = np.array(label_volume.GetDirection())
raise ValueError(
f"Image and label direction are not the same for "
f"{image_path} and {label_path}: "
f"{arr_image} and {arr_label}, "
f"difference is {arr_image - arr_label} for "
f"rtol={rtol} and atol = {atol}."
)
if not np.allclose(
image_volume.GetOrigin(), label_volume.GetOrigin(), rtol=rtol, atol=atol
):
raise ValueError(
f"Image and label origin are not the same for "
f"{image_path} and {label_path}: "
f"{image_volume.GetOrigin()} and {label_volume.GetOrigin()}."
)
def resample(
volume: sitk.Image, is_label: bool, target_spacing: Tuple[float, ...]
) -> sitk.Image:
"""Resample volume to the target spacing.
Args:
volume: volume to resample.
is_label: True if it represents a label,
thus nearest neighbour for interpolation.
target_spacing: target dimension per axis.
Returns:
Resampled volume.
"""
original_spacing = volume.GetSpacing()
original_shape = volume.GetSize()
original_center = volume.GetOrigin()
# calculate shape after resampling
# round to integers to be robust
# otherwise, ceiling is sensitive to spacing
resample_target_shape = tuple(
int(np.round(orig_sh * orig_sp / trg_sp))
for orig_sh, orig_sp, trg_sp in zip(
original_shape, original_spacing, target_spacing
)
)
interpolator = sitk.sitkNearestNeighbor if is_label else sitk.sitkLinear
# No transform because we do not want to change the represented
# physical size of the objects in the image
transform = sitk.Transform()
# The origin is middle of the voxel/pixel
# https://simpleitk.readthedocs.io/en/master/fundamentalConcepts.html
target_center = [
x + 0.5 * (target_spacing[i] - original_spacing[i])
for i, x in enumerate(original_center)
]
# Do not rotate
target_direction = volume.GetDirection()
volume = sitk.Resample(
volume,
size=resample_target_shape,
transform=transform,
interpolator=interpolator,
outputOrigin=target_center,
outputSpacing=target_spacing,
outputDirection=target_direction,
defaultPixelValue=0,
outputPixelType=volume.GetPixelID(),
useNearestNeighborExtrapolator=False,
)
return volume
def load_and_preprocess_image_and_label(
uid: str,
image_path: Path,
label_path: Path,
out_dir: Path,
target_spacing: Tuple[float, float, float],
target_shape: Tuple[int, int, int],
intensity_range: Optional[Tuple[float, float]],
) -> Tuple[np.ndarray, np.ndarray]:
"""Load image and perform resampling/padding/cropping using SimpleITK.
This function also saves the processed image/masks.
https://examples.itk.org/src/filtering/imagegrid/resampleanimage/documentation
Args:
uid: unique id for the image.
image_path: file path of Nifti file for image.
label_path: file path of Nifti file for image.
out_dir: directory to save preprocessed files.
target_spacing: size of each voxel, of shape (dx, dy, dz),
target_shape: size of image, of shape (width, height, depth).
intensity_range: image intensity range before normalisation.
Returns:
image array, of shape (width, height, depth).
label array, of shape (width, height, depth).
"""
# load
image_volume = sitk.ReadImage(str(image_path))
label_volume = sitk.ReadImage(str(label_path))
# metadata should be the same
check_image_and_label(
image_volume=image_volume,
label_volume=label_volume,
image_path=image_path,
label_path=label_path,
)
# resample
image_volume = resample(
volume=image_volume, is_label=False, target_spacing=target_spacing
)
label_volume = resample(
volume=label_volume, is_label=True, target_spacing=target_spacing
)
if image_volume.GetSize() != label_volume.GetSize():
raise ValueError(
f"After resampling image and label does not match: "
f"image = {image_volume.GetSize()} "
f"label = {label_volume.GetSize()}."
)
# pad if the size is smaller than target
# center-padding can be calculated on image or label
pad_lower, pad_upper = get_center_pad_shape(
current_shape=label_volume.GetSize(), target_shape=target_shape
)
image_volume = sitk.ConstantPad(image_volume, pad_lower, pad_upper, 0)
label_volume = sitk.ConstantPad(label_volume, pad_lower, pad_upper, 0)
# crop if the size is larger than target
# crop is calculated on labels to ensure maximum area of labels
label_array = sitk.GetArrayFromImage(label_volume)
label_array = np.transpose(label_array, axes=[2, 1, 0])
bbox_min, bbox_max = get_binary_mask_bounding_box(mask=label_array)
crop_lower, crop_upper = get_center_crop_shape_from_bbox(
bbox_min=bbox_min,
bbox_max=bbox_max,
current_shape=label_volume.GetSize(),
target_shape=target_shape,
)
image_volume = sitk.Crop(image_volume, crop_lower, crop_upper)
label_volume = sitk.Crop(label_volume, crop_lower, crop_upper)
# check shape
if image_volume.GetSize() != target_shape:
raise ValueError(
f"After resampling/padding/cropping, image shape "
f"{image_volume.GetSize()} is wrong for "
f"{image_path}"
)
if label_volume.GetSize() != target_shape:
raise ValueError(
f"After resampling/padding/cropping, label shape "
f"{label_volume.GetSize()} is wrong for "
f"{label_path}"
)
# clip intensity if configured
if intensity_range is not None:
image_volume = sitk.Clamp(
image_volume,
lowerBound=intensity_range[0],
upperBound=intensity_range[1],
)
# for image, normalise the intensity
image_volume = sitk.Normalize(image_volume)
image_volume = sitk.RescaleIntensity(
image_volume, outputMinimum=0, outputMaximum=1
)
# cast dtype
image_volume = sitk.Cast(image_volume, sitk.sitkFloat32)
label_volume = sitk.Cast(label_volume, sitk.sitkUInt16)
# save processed image/mask
image_out_path = out_dir / (uid + "_img_preprocessed.nii.gz")
label_out_path = out_dir / (uid + "_mask_preprocessed.nii.gz")
sitk.WriteImage(
image=image_volume, fileName=str(image_out_path), useCompression=True
)
sitk.WriteImage(
image=label_volume, fileName=str(label_out_path), useCompression=True
)
# return array and switch axes
image = np.transpose(sitk.GetArrayFromImage(image_volume), axes=[2, 1, 0])
label = np.transpose(sitk.GetArrayFromImage(label_volume), axes=[2, 1, 0])
return image, label
def save_segmentation_prediction(
preds: np.ndarray,
uids: List,
out_dir: Path,
tfds_dir: Path,
) -> None:
"""Save segmentation predictions.
Args:
preds: (num_samples, ...), the values are integers.
uids: (num_samples,).
out_dir: output directory.
tfds_dir: directory saving preprocessed images and labels.
"""
out_dir.mkdir(parents=True, exist_ok=True)
for i, uid in enumerate(uids):
# transform np array to volume
mask_pred = np.transpose(preds[i, ...], axes=[2, 1, 0]).astype(
dtype="uint16"
)
volume_mask_pred = sitk.GetImageFromArray(mask_pred)
# copy meta data
volume_mask_true = sitk.ReadImage(
tfds_dir / f"{uid}_mask_preprocessed.nii.gz"
)
volume_mask_pred.CopyInformation(volume_mask_true)
# output
sitk.WriteImage(
image=volume_mask_pred,
fileName=out_dir / f"{uid}_mask_pred.nii.gz",
useCompression=True,
)
| 9,837 | 32.80756 | 82 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/datasets/amos_ct/__init__.py | """AMOS dataset.
https://arxiv.org/abs/2206.08023
https://zenodo.org/record/7155725#.Y4ndMuzP2rN
"""
| 103 | 13.857143 | 46 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/datasets/amos_ct/amos_ct_dataset_builder.py | """AMOS CT image dataset."""
import json
from pathlib import Path
from typing import Dict, Generator, List, Tuple
import numpy as np
import tensorflow_datasets as tfds
from imgx import IMAGE, LABEL, TEST_SPLIT, TRAIN_SPLIT, UID, VALID_SPLIT
from imgx.datasets.preprocess import load_and_preprocess_image_and_label
_DESCRIPTION = """
The data set includes 500 CT images from acquired from Amos: A large-scale abdominal multi-organ benchmark for versatile medical image segmentation.
""" # noqa: E501
_CITATION = """
@article{ji2022amos,
title={Amos: A large-scale abdominal multi-organ benchmark for versatile medical image segmentation},
author={Ji, Yuanfeng and Bai, Haotian and Yang, Jie and Ge, Chongjian and Zhu, Ye and Zhang, Ruimao and Li, Zhen and Zhang, Lingyan and Ma, Wanling and Wan, Xiang and others},
journal={arXiv preprint arXiv:2206.08023},
year={2022}
}
""" # noqa: E501
AMOS_CT_IMAGE_SPACING: Tuple[float, float, float] = (1.5, 1.5, 5.0)
AMOS_CT_IMAGE_SHAPE: Tuple[int, int, int] = (192, 128, 128)
AMOS_CLASS_NAMES = [
"spleen", # class 1
"right kidney",
"left kidney",
"gall bladder",
"esophagus",
"liver",
"stomach",
"arota",
"postcava",
"pancreas",
"right adrenal gland",
"left adrenal gland",
"duodenum",
"bladder",
"prostate/uterus",
]
AMOS_NUM_CLASSES = len(AMOS_CLASS_NAMES) + 1 # include background
AMOS_TFDS_FOLD = "ZIP.zenodo.org_record_7155725_files_amos22uIHT-rS-kf9k08JEainUcaKYppRMKikiGkm48PK52p0.zip" # noqa: E501, pylint: disable=line-too-long
AMOS_VALID_RATIO = 0.1
def keep_ct_data(path_pairs: List[Dict[str, str]]) -> List[Dict[str, str]]:
"""Keep CT data only.
ID numbers less than 500 belong to CT data,
otherwise they belong to MRI data.
Args:
path_pairs: pairs of image and label relative paths.
[{'image': './imagesTr/amos_0001.nii.gz',
'label': './labelsTr/amos_0001.nii.gz'},
{'image': './imagesTr/amos_0004.nii.gz',
'label': './labelsTr/amos_0004.nii.gz'}]
Returns:
Filtered data.
"""
filtered_pairs = []
for sample in path_pairs:
image_path = sample["image"]
uid = int(Path(image_path).name.split(".")[0].split("_")[1])
if uid > 500:
continue
filtered_pairs.append(sample)
return filtered_pairs
class Builder(
tfds.core.GeneratorBasedBuilder, skip_registration=True
): # type: ignore[call-arg]
"""DatasetBuilder for male_pelvic_mr dataset.
Skip registration due to an error in test, saying already registered.
https://github.com/tensorflow/datasets/issues/552
There are eight classes:
"BladderMask", "BoneMask", "ObdInternMask", "TZ",
"CG", "RectumMask", "SV", "NVB",
corresponding to 1, 2, ..., 8.
"""
VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "CT image only.",
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(
{
UID: tfds.features.Text(),
IMAGE: tfds.features.Tensor(
shape=AMOS_CT_IMAGE_SHAPE, dtype=np.float32
),
LABEL: tfds.features.Tensor(
shape=AMOS_CT_IMAGE_SHAPE, dtype=np.uint16
),
}
),
supervised_keys=None,
homepage="https://zenodo.org/record/7155725#.ZAN1mOzP2rM",
citation=_CITATION,
)
def _split_generators(
self, dl_manager: tfds.download.DownloadManager
) -> Dict[str, Generator[Tuple[str, Dict[str, np.ndarray]], None, None]]:
"""Returns dict of generators.
Args:
dl_manager: for downloading files.
Returns:
dict mapping split to generators.
"""
# Download data from zenodo
zip_dir = dl_manager.download_and_extract(
"https://zenodo.org/record/7155725/files/amos22.zip"
)
data_dir = zip_dir / "amos22"
preprocessed_dir = zip_dir / "preprocessed"
preprocessed_dir.mkdir(parents=True, exist_ok=True)
# Read metadata
with open(data_dir / "dataset.json", encoding="utf-8") as f:
metadata = json.load(f)
# keep ct data only
train_pairs = keep_ct_data(metadata["training"])
valid_pairs = keep_ct_data(metadata["validation"])
# split valid into valid and test
num_valid = int(len(valid_pairs) * AMOS_VALID_RATIO)
test_pairs = valid_pairs[num_valid:]
valid_pairs = valid_pairs[:num_valid]
# Returns the Dict[split names, Iterator[Key, Example]]
return {
TRAIN_SPLIT: self._generate_examples(
image_label_pairs=train_pairs,
data_dir=data_dir,
preprocessed_dir=preprocessed_dir,
),
VALID_SPLIT: self._generate_examples(
image_label_pairs=valid_pairs,
data_dir=data_dir,
preprocessed_dir=preprocessed_dir,
),
TEST_SPLIT: self._generate_examples(
image_label_pairs=test_pairs,
data_dir=data_dir,
preprocessed_dir=preprocessed_dir,
),
}
def _generate_examples(
self,
image_label_pairs: List[Dict[str, str]],
data_dir: Path,
preprocessed_dir: Path,
) -> Generator[Tuple[str, Dict[str, np.ndarray]], None, None]:
"""Yields examples.
Data Prepossessing Following nnUNet, for the CT data,
we clip the HU values of each scans to the [-991, 362] range.
https://arxiv.org/abs/2206.08023 Section C.1
Args:
image_label_pairs: pairs of image and label relative paths.
[{'image': './imagesTr/amos_0001.nii.gz',
'label': './labelsTr/amos_0001.nii.gz'},
{'image': './imagesTr/amos_0004.nii.gz',
'label': './labelsTr/amos_0004.nii.gz'}]
data_dir: directory saving images/masks.
preprocessed_dir: directory to save processed images/masks.
Yields:
- image_key
- dict having image and label numpy arrays.
"""
for sample in image_label_pairs:
image_path = sample["image"]
label_path = sample["label"]
uid = Path(image_path).name.split(".")[0].split("_")[1]
image, label = load_and_preprocess_image_and_label(
uid=uid,
image_path=data_dir / image_path,
label_path=data_dir / label_path,
out_dir=preprocessed_dir,
target_spacing=AMOS_CT_IMAGE_SPACING,
target_shape=AMOS_CT_IMAGE_SHAPE,
intensity_range=(-991.0, 362.0),
)
yield uid, {
UID: uid,
IMAGE: image,
LABEL: label,
}
| 7,208 | 33.004717 | 177 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/datasets/male_pelvic_mr/male_pelvic_mr_dataset_builder.py | """male_pelvic_mr dataset."""
from pathlib import Path
from typing import Dict, Generator, List, Tuple
import numpy as np
import pandas as pd
import tensorflow_datasets as tfds
from imgx import IMAGE, LABEL, TEST_SPLIT, TRAIN_SPLIT, UID, VALID_SPLIT
from imgx.datasets.preprocess import load_and_preprocess_image_and_label
_DESCRIPTION = """
The data set includes 589 T2-weighted images acquired from the same number of
patients collected by seven studies,
INDEX (Dickinson et al., 2013),
SmartTarget Biopsy Trial (Hamid et al., 2019),
PICTURE (Simmons et al., 2014),
TCIA Prostate3T (Litjens et al., 2015),
Promise12 (Litjens et al., 2014),
TCIA ProstateDx (Diagnosis) (Bloch et al., 2015), and
the Prostate MR Image Database (Choyke et al., 2016).
"""
_CITATION = """
@article{li2022prototypical,
title={Prototypical few-shot segmentation for cross-institution male pelvic structures with spatial registration},
author={Li, Yiwen and Fu, Yunguan and Gayo, Iani and Yang, Qianye and Min, Zhe and Saeed, Shaheer and Yan, Wen and Wang, Yipei and Noble, J Alison and Emberton, Mark and others},
journal={arXiv preprint arXiv:2209.05160},
year={2022}
}
""" # noqa: E501
PELVIC_IMAGE_SPACING: Tuple[float, float, float] = (0.75, 0.75, 2.5)
PELVIC_IMAGE_SHAPE: Tuple[int, int, int] = (256, 256, 48)
PELVIC_TRAIN_RATIO = 0.75
PELVIC_CLASS_NAMES = [
"BladderMask",
"BoneMask",
"ObdInternMask",
"TZ",
"CG",
"RectumMask",
"SV",
"NVB",
]
PELVIC_NUM_CLASSES = len(PELVIC_CLASS_NAMES) + 1 # include background
PELVIC_INSTITUTIONS = [
"UCL",
"Prostate3T",
"ProstateDx",
"ProstateMRI",
"bergen",
"Nijmegen",
"Rutgers",
]
PELVIC_TFDS_FOLD = "ZIP.zenodo.org_record_7013610_files_dataW0mCI6aH_V-TdeDbM4TdKelNcJ5ZxbAi5isebqCnMr0.zip" # noqa: E501, pylint: disable=line-too-long
class Builder(
tfds.core.GeneratorBasedBuilder, skip_registration=True
): # type: ignore[call-arg]
"""DatasetBuilder for male_pelvic_mr dataset.
Skip registration due to an error in test, saying already registered.
https://github.com/tensorflow/datasets/issues/552
There are eight classes:
"BladderMask", "BoneMask", "ObdInternMask", "TZ",
"CG", "RectumMask", "SV", "NVB",
corresponding to 1, 2, ..., 8.
"""
VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial release.",
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(
{
UID: tfds.features.Text(),
IMAGE: tfds.features.Tensor(
shape=PELVIC_IMAGE_SHAPE, dtype=np.float32
),
LABEL: tfds.features.Tensor(
shape=PELVIC_IMAGE_SHAPE, dtype=np.uint16
),
}
),
supervised_keys=None,
homepage="https://zenodo.org/record/7013610#.Y1U95-zMKrM",
citation=_CITATION,
)
def _split_generators(
self, dl_manager: tfds.download.DownloadManager
) -> Dict[str, Generator[Tuple[str, Dict[str, np.ndarray]], None, None]]:
"""Returns dict of generators.
Args:
dl_manager: for downloading files.
Returns:
dict mapping split to generators.
"""
# Download data from zenodo
zip_dir = dl_manager.download_and_extract(
"https://zenodo.org/record/7013610/files/data.zip"
)
data_dir = zip_dir / "data"
preprocessed_dir = zip_dir / "preprocessed"
preprocessed_dir.mkdir(parents=True, exist_ok=True)
institution_path = dl_manager.download(
"https://zenodo.org/record/7013610/files/institution.txt"
)
# Organize metadata
df = pd.read_csv(
institution_path,
sep=" ",
header=None,
names=["key", "institution"],
dtype={"key": object, "institution": object},
)
ins_to_uids = df.groupby("institution")["key"].agg(list).to_dict()
# train/valid/test split
# for each institution
# - train+valid:test = 3:1
# - valid has two images
train_uids = []
valid_uids = []
test_uids = []
for uids in ins_to_uids.values():
num_examples = len(uids)
num_examples_valid = 2
num_examples_train = (
int(num_examples * PELVIC_TRAIN_RATIO) - num_examples_valid
)
train_uids += uids[2:num_examples_train]
valid_uids += uids[:2]
test_uids += uids[num_examples_train:]
# Returns the Dict[split names, Iterator[uid, example]]
return {
TRAIN_SPLIT: self._generate_examples(
uids=train_uids,
data_dir=data_dir,
preprocessed_dir=preprocessed_dir,
),
VALID_SPLIT: self._generate_examples(
uids=valid_uids,
data_dir=data_dir,
preprocessed_dir=preprocessed_dir,
),
TEST_SPLIT: self._generate_examples(
uids=test_uids,
data_dir=data_dir,
preprocessed_dir=preprocessed_dir,
),
}
def _generate_examples(
self, uids: List[str], data_dir: Path, preprocessed_dir: Path
) -> Generator[Tuple[str, Dict[str, np.ndarray]], None, None]:
"""Yields examples.
Args:
uids: unique ids for images.
data_dir: directory saving images/masks.
preprocessed_dir: directory to save processed images/masks.
Yields:
- image_key
- dict having image and label numpy arrays.
"""
for uid in uids:
image, label = load_and_preprocess_image_and_label(
uid=uid,
image_path=data_dir / f"{uid}_img.nii",
label_path=data_dir / f"{uid}_mask.nii",
out_dir=preprocessed_dir,
target_spacing=PELVIC_IMAGE_SPACING,
target_shape=PELVIC_IMAGE_SHAPE,
intensity_range=None,
)
yield uid, {
UID: uid,
IMAGE: image,
LABEL: label,
}
| 6,514 | 31.738693 | 180 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/datasets/male_pelvic_mr/__init__.py | """Male pelvic MR dataset.
https://arxiv.org/abs/2209.05160
"""
| 65 | 12.2 | 32 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/diffusion/variance_schedule.py | """Variance schedule for diffusion models."""
from __future__ import annotations
import enum
import numpy as np
from jax import numpy as jnp
class DiffusionBetaSchedule(enum.Enum):
"""Class to define beta schedule."""
LINEAR = enum.auto()
QUADRADIC = enum.auto()
COSINE = enum.auto()
WARMUP10 = enum.auto()
WARMUP50 = enum.auto()
def get_beta_schedule(
num_timesteps: int,
beta_schedule: DiffusionBetaSchedule,
beta_start: float,
beta_end: float,
) -> jnp.ndarray:
"""Get variance (beta) schedule for q(x_t | x_{t-1}).
Args:
num_timesteps: number of time steps in total, T.
beta_schedule: schedule for beta.
beta_start: beta for t=0.
beta_end: beta for t=T-1.
Returns:
Shape (num_timesteps,) array of beta values, for t=0, ..., T-1.
Values are in ascending order.
Raises:
ValueError: for unknown schedule.
"""
if beta_schedule == DiffusionBetaSchedule.LINEAR:
return jnp.linspace(
beta_start,
beta_end,
num_timesteps,
)
if beta_schedule == DiffusionBetaSchedule.QUADRADIC:
return (
jnp.linspace(
beta_start**0.5,
beta_end**0.5,
num_timesteps,
)
** 2
)
if beta_schedule == DiffusionBetaSchedule.COSINE:
def f(t: float) -> float:
"""Eq 17 in https://arxiv.org/abs/2102.09672.
Args:
t: time step with values in [0, 1].
Returns:
Cumulative product of alpha.
"""
return np.cos((t + 0.008) / 1.008 * np.pi / 2) ** 2
betas = [0.0]
alphas_cumprod_prev = 1.0
for i in range(1, num_timesteps):
t = i / (num_timesteps - 1)
alphas_cumprod = f(t)
beta = 1 - alphas_cumprod / alphas_cumprod_prev
betas.append(beta)
return jnp.array(betas) * (beta_end - beta_start) + beta_start
if beta_schedule == DiffusionBetaSchedule.WARMUP10:
num_timesteps_warmup = max(num_timesteps // 10, 1)
betas_warmup = (
jnp.linspace(
beta_start**0.5,
beta_end**0.5,
num_timesteps_warmup,
)
** 2
)
return jnp.concatenate(
[
betas_warmup,
jnp.ones((num_timesteps - num_timesteps_warmup,)) * beta_end,
]
)
if beta_schedule == DiffusionBetaSchedule.WARMUP50:
num_timesteps_warmup = max(num_timesteps // 2, 1)
betas_warmup = (
jnp.linspace(
beta_start**0.5,
beta_end**0.5,
num_timesteps_warmup,
)
** 2
)
return jnp.concatenate(
[
betas_warmup,
jnp.ones((num_timesteps - num_timesteps_warmup,)) * beta_end,
]
)
raise ValueError(f"Unknown beta_schedule {beta_schedule}.")
def downsample_beta_schedule(
betas: jnp.ndarray,
num_timesteps: int,
num_timesteps_to_keep: int,
) -> jnp.ndarray:
"""Downsample beta schedule.
Args:
betas: beta schedule, shape (num_timesteps,).
Values are in ascending order.
num_timesteps: number of time steps in total, T.
num_timesteps_to_keep: number of time steps to keep.
Returns:
Downsampled beta schedule, shape (num_timesteps_to_keep,).
"""
if betas.shape != (num_timesteps,):
raise ValueError(
f"betas.shape ({betas.shape}) must be equal to "
f"(num_timesteps,)=({num_timesteps},)"
)
if (num_timesteps - 1) % (num_timesteps_to_keep - 1) != 0:
raise ValueError(
f"num_timesteps-1={num_timesteps-1} can't be evenly divided by "
f"num_timesteps_to_keep-1={num_timesteps_to_keep-1}."
)
if num_timesteps_to_keep < 2:
raise ValueError(
f"num_timesteps_to_keep ({num_timesteps_to_keep}) must be >= 2."
)
if num_timesteps_to_keep == num_timesteps:
return betas
if num_timesteps_to_keep < num_timesteps:
step_scale = (num_timesteps - 1) // (num_timesteps_to_keep - 1)
beta0 = betas[0]
alphas = 1.0 - betas
alphas_cumprod = jnp.cumprod(alphas)
# (num_timesteps_to_keep,)
alphas_cumprod = alphas_cumprod[::step_scale]
# (num_timesteps_to_keep-1,)
betas = 1.0 - alphas_cumprod[1:] / alphas_cumprod[:-1]
# (num_timesteps_to_keep,)
betas = jnp.append(beta0, betas)
return betas
raise ValueError(
f"num_timesteps_to_keep ({num_timesteps_to_keep}) "
f"must be <= num_timesteps ({num_timesteps})"
)
| 4,844 | 29.093168 | 77 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/diffusion/gaussian_diffusion.py | """Gaussian diffusion related functions.
https://github.com/WuJunde/MedSegDiff/blob/master/guided_diffusion/gaussian_diffusion.py
https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/diffusion_utils_2.py
"""
import dataclasses
import enum
from typing import Callable, Iterator, Sequence, Tuple, Union
import haiku as hk
import jax.numpy as jnp
import jax.random
from imgx import EPS
from imgx.diffusion.variance_schedule import (
DiffusionBetaSchedule,
downsample_beta_schedule,
get_beta_schedule,
)
from imgx.metric.distribution import (
discretized_gaussian_log_likelihood,
normal_kl,
)
class DiffusionModelOutputType(enum.Enum):
"""Class to define model's output meaning.
- X_START: model predicts x_0.
- X_PREVIOUS: model predicts x_{t-1}.
- EPSILON: model predicts noise epsilon.
"""
X_START = enum.auto()
X_PREVIOUS = enum.auto()
EPSILON = enum.auto()
class DiffusionModelVarianceType(enum.Enum):
r"""Class to define p(x_{t-1} | x_t) variance.
- FIXED_SMALL: a smaller variance,
\tilde{beta}_t = (1-\bar{alpha}_{t-1})/(1-\bar{alpha}_{t})*beta_t.
- FIXED_LARGE: a larger variance, beta_t.
- LEARNED: model outputs an array with channel=2, for mean and variance.
- LEARNED_RANGE: model outputs an array with channel=2, for mean and
variance. But the variance is not raw values, it's a coefficient to
control the value between FIXED_SMALL and FIXED_LARGE.
"""
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED = enum.auto()
LEARNED_RANGE = enum.auto()
class DiffusionSpace(enum.Enum):
"""Class to define the meaning of x.
Model always outputs logits.
"""
SCALED_PROBS = enum.auto() # values will be [-1, 1]
LOGITS = enum.auto()
def extract_and_expand(
arr: jnp.ndarray, t: jnp.ndarray, ndim: int
) -> jnp.ndarray:
"""Extract values from a 1D array and expand.
This function is not jittable.
Args:
arr: 1D of shape (num_timesteps, ).
t: storing index values < self.num_timesteps, shape (batch, ).
ndim: number of dimensions for an array of shape (batch, ...).
Returns:
Expanded array of shape (batch, ...), expanded axes have dim 1.
"""
return jnp.expand_dims(arr[t], axis=tuple(range(1, ndim)))
@dataclasses.dataclass
class GaussianDiffusion(hk.Module):
"""Class for Gaussian diffusion sampling.
https://github.com/WuJunde/MedSegDiff/blob/master/guided_diffusion/gaussian_diffusion.py
TODO: split segmentation related functions to a sub-class.
"""
def __init__(
self,
model: hk.Module,
num_timesteps: int, # T
num_timesteps_beta: int,
beta_schedule: DiffusionBetaSchedule,
beta_start: float,
beta_end: float,
model_out_type: DiffusionModelOutputType,
model_var_type: DiffusionModelVarianceType,
x_space: DiffusionSpace,
x_limit: float,
use_ddim: bool,
noise_fn: Callable = jax.random.normal,
) -> None:
"""Init.
q(x_t | x_{t-1}) ~ Normal(sqrt(1-beta_t)*x_{t-1}, beta_t*I)
Args:
model: haiku model.
num_timesteps: number of diffusion steps.
num_timesteps_beta: number of steps when defining beta schedule.
beta_schedule: schedule for betas.
beta_start: beta for t=0.
beta_end: beta for t=T.
model_out_type: type of model output.
model_var_type: type of variance for p(x_{t-1} | x_t).
x_space: x is logits or scaled_probs.
x_limit: x_t has values in [-x_limit, x_limit], the range has to be
symmetric, as for T, the distribution is centered at zero.
use_ddim: use ddim_sample.
noise_fn: a function that gets noise of the same shape as x_t.
"""
super().__init__()
self.model = model
self.num_timesteps = num_timesteps
self.num_timesteps_beta = num_timesteps_beta
self.use_ddim = use_ddim
self.model_out_type = model_out_type
self.model_var_type = model_var_type
self.x_space = x_space
self.x_limit = x_limit
self.noise_fn = noise_fn
# shape are all (T,)
# corresponding to 0, ..., T-1, where 0 means one step
betas = get_beta_schedule(
num_timesteps=num_timesteps_beta,
beta_schedule=beta_schedule,
beta_start=beta_start,
beta_end=beta_end,
)
self.betas = downsample_beta_schedule(
betas=betas,
num_timesteps=num_timesteps_beta,
num_timesteps_to_keep=num_timesteps,
)
alphas = 1.0 - self.betas # alpha_t
self.alphas_cumprod = jnp.cumprod(alphas) # \bar{alpha}_t
self.alphas_cumprod_prev = jnp.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = jnp.append(self.alphas_cumprod[1:], 0.0)
self.sqrt_alphas_cumprod = jnp.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = jnp.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = jnp.log(1.0 - self.alphas_cumprod)
# last value is inf as last value of alphas_cumprod is zero
self.sqrt_recip_alphas_cumprod = jnp.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod_minus_one = jnp.sqrt(
1.0 / self.alphas_cumprod - 1
)
# q(x_{t-1} | x_t, x_0)
# mean = coeff_start * x_0 + coeff_t * x_t
# first values are nan
self.posterior_mean_coeff_start = (
self.betas
* jnp.sqrt(self.alphas_cumprod_prev)
/ (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coeff_t = (
jnp.sqrt(alphas)
* (1.0 - self.alphas_cumprod_prev)
/ (1.0 - self.alphas_cumprod)
)
# variance
# log calculation clipped because the posterior variance is 0 at t=0
# alphas_cumprod_prev has 1.0 appended in front
self.posterior_variance = (
self.betas
* (1.0 - self.alphas_cumprod_prev)
/ (1.0 - self.alphas_cumprod)
)
# posterior_variance first value is zero
self.posterior_log_variance_clipped = jnp.log(
jnp.append(self.posterior_variance[1], self.posterior_variance[1:])
)
def q_mean_log_variance(
self, x_start: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Get the distribution q(x_t | x_0).
Args:
x_start: noiseless input, shape (batch, ...).
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
mean: shape (batch, ...), expanded axes have dim 1.
log_variance: shape (batch, ...), expanded axes have dim 1.
"""
mean = (
extract_and_expand(self.sqrt_alphas_cumprod, t, x_start.ndim)
* x_start
)
log_variance = extract_and_expand(
self.log_one_minus_alphas_cumprod, t, x_start.ndim
)
return mean, log_variance
def q_sample(
self,
x_start: jnp.ndarray,
noise: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Sample from q(x_t | x_0).
Args:
x_start: noiseless input, shape (batch, ...).
noise: same shape as x_start.
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
Noisy array with same shape as x_start.
"""
mean = (
extract_and_expand(self.sqrt_alphas_cumprod, t, x_start.ndim)
* x_start
)
var = extract_and_expand(
self.sqrt_one_minus_alphas_cumprod, t, x_start.ndim
)
x_t = mean + var * noise
x_t = self.clip_x(x_t)
return x_t
def q_posterior_mean(
self, x_start: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> jnp.ndarray:
"""Get mean of the distribution q(x_{t-1} | x_t, x_0).
Args:
x_start: noiseless input, shape (batch, ...).
x_t: noisy input, same shape as x_start.
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
mean: same shape as x_start.
"""
return (
extract_and_expand(self.posterior_mean_coeff_start, t, x_start.ndim)
* x_start
+ extract_and_expand(self.posterior_mean_coeff_t, t, x_start.ndim)
* x_t
)
def q_posterior_mean_variance(
self, x_start: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Get the distribution q(x_{t-1} | x_t, x_0).
Args:
x_start: noiseless input, shape (batch, ...).
x_t: noisy input, same shape as x_start.
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
mean: same shape as x_start.
log_variance: shape (batch, ...), expanded axes have dim 1.
"""
mean = self.q_posterior_mean(x_start, x_t, t)
log_variance = extract_and_expand(
self.posterior_log_variance_clipped, t, x_start.ndim
)
return mean, log_variance
def p_mean_variance( # pylint:disable=R0912
self,
model_out: jnp.ndarray,
x_t: jnp.ndarray,
t: jnp.ndarray,
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Get the distribution p(x_{t-1} | x_t).
Args:
model_out: model predicted output.
If model estimates variance, shape (batch, ..., 2*num_classes),
else shape (batch, ..., num_classes).
x_t: noisy input, shape (batch, ..., num_classes).
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
x_start: predicted, same shape as x_t, values are clipped.
mean: same shape as x_t.
log_variance: compatible shape to (batch, ..., num_classes).
"""
# variance
if self.model_var_type == DiffusionModelVarianceType.FIXED_SMALL:
log_variance = self.posterior_log_variance_clipped
# extend shape
log_variance = extract_and_expand(log_variance, t, x_t.ndim)
elif self.model_var_type == DiffusionModelVarianceType.FIXED_LARGE:
# TODO why appending?
variance = jnp.append(self.posterior_variance[1], self.betas[1:])
log_variance = jnp.log(variance)
# extend shape
log_variance = extract_and_expand(log_variance, t, x_t.ndim)
elif self.model_var_type == DiffusionModelVarianceType.LEARNED:
# model_out (batch, ..., num_classes*2)
model_out, log_variance = jnp.split(
model_out, indices_or_sections=2, axis=-1
)
elif self.model_var_type == DiffusionModelVarianceType.LEARNED_RANGE:
# model_out (batch, ..., num_classes*2)
model_out, var_coeff = jnp.split(
model_out, indices_or_sections=2, axis=-1
)
log_min_variance = self.posterior_log_variance_clipped
log_max_variance = jnp.log(self.betas)
log_min_variance = extract_and_expand(log_min_variance, t, x_t.ndim)
log_max_variance = extract_and_expand(log_max_variance, t, x_t.ndim)
# var_coeff values are in [-1, 1] for [min_var, max_var].
var_coeff = jnp.clip(var_coeff, -1.0, 1.0)
var_coeff = (var_coeff + 1) / 2
log_variance = (
var_coeff * log_max_variance
+ (1 - var_coeff) * log_min_variance
)
else:
raise ValueError(
f"Unknown DiffusionModelVarianceType {self.model_var_type}."
)
# mean
if self.model_out_type == DiffusionModelOutputType.X_START:
# q(x_{t-1} | x_t, x_0)
x_start = self.logits_to_x(model_out)
x_start = self.clip_x(x_start)
mean = self.q_posterior_mean(x_start=x_start, x_t=x_t, t=t)
elif self.model_out_type == DiffusionModelOutputType.X_PREVIOUS:
# x_{t-1}
x_prev = self.logits_to_x(model_out)
x_prev = self.clip_x(x_prev)
mean = x_prev
x_start = self.predict_xstart_from_xprev_xt(
x_prev=x_prev, x_t=x_t, t=t
)
x_start = self.clip_x(x_start)
elif self.model_out_type == DiffusionModelOutputType.EPSILON:
x_start = self.predict_xstart_from_epsilon_xt(
x_t=x_t, epsilon=model_out, t=t
)
x_start = self.clip_x(x_start)
mean = self.q_posterior_mean(x_start=x_start, x_t=x_t, t=t)
else:
raise ValueError(
f"Unknown DiffusionModelOutputType {self.model_out_type}."
)
return x_start, mean, log_variance
def p_sample(
self,
model_out: jnp.ndarray,
x_t: jnp.ndarray,
t: jnp.ndarray,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Sample x_{t-1} ~ p(x_{t-1} | x_t).
Args:
model_out: model predicted output.
If model estimates variance, shape (batch, ..., 2*num_classes),
else shape (batch, ..., num_classes).
x_t: noisy input, shape (batch, ..., num_classes).
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
sample: x_{t-1}, same shape as x_t.
x_start_pred: same shape as x_t.
"""
x_start_pred, mean, log_variance = self.p_mean_variance(
model_out=model_out,
x_t=x_t,
t=t,
)
noise = self.noise_sample(shape=x_t.shape, dtype=x_t.dtype)
# no noise when t=0
# mean + exp(log(sigma**2)/2) * noise = mean + sigma * noise
nonzero_mask = jnp.expand_dims(
jnp.array(t != 0, dtype=noise.dtype),
axis=tuple(range(1, noise.ndim)),
)
sample = mean + nonzero_mask * jnp.exp(0.5 * log_variance) * noise
# clip as the value may be out of range
sample = self.clip_x(sample)
return sample, x_start_pred
def ddim_sample(
self,
model_out: jnp.ndarray,
x_t: jnp.ndarray,
t: jnp.ndarray,
eta: float = 0.0,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Sample x_{t-1} ~ p(x_{t-1} | x_t).
TODO: what are the differences between p_sample / ddim_sample?
Args:
model_out: model predicted output.
If model estimates variance, shape (batch, ..., 2*num_classes),
else shape (batch, ..., num_classes).
x_t: noisy input, shape (batch, ..., num_classes).
t: storing index values < self.num_timesteps, shape (batch, ).
eta: control the noise level in sampling.
Returns:
sample: x_{t-1}, same shape as x_t.
x_start_pred: same shape as x_t.
"""
# TODO why not using log variance output here?
x_start_pred, _, _ = self.p_mean_variance(
model_out=model_out,
x_t=x_t,
t=t,
)
noise = self.noise_sample(shape=x_t.shape, dtype=x_t.dtype)
epsilon = self.predict_epsilon_from_xstart_xt(
x_t=x_t, x_start=x_start_pred, t=t
)
alphas_cumprod_prev = extract_and_expand(
self.alphas_cumprod_prev, t, x_t.ndim
)
coeff_start = jnp.sqrt(alphas_cumprod_prev)
log_variance = (
extract_and_expand(self.posterior_log_variance_clipped, t, x_t.ndim)
* eta
)
coeff_epsilon = jnp.sqrt(1.0 - alphas_cumprod_prev - log_variance**2)
mean = coeff_start * x_start_pred + coeff_epsilon * epsilon
nonzero_mask = jnp.expand_dims(
jnp.array(t != 0, dtype=noise.dtype),
axis=tuple(range(1, noise.ndim)),
)
sample = mean + nonzero_mask * log_variance * noise
# clip as the value may be out of range
sample = self.clip_x(sample)
return sample, x_start_pred
def sample_mask(
self,
image: jnp.ndarray,
x_t: jnp.ndarray,
) -> jnp.ndarray:
"""Generate segmentation mask from the model conditioned on image.
The noise here is defined on segmentation mask.
x_t is considered as logits.
Args:
image: image to be segmented, shape = (batch, ..., C).
x_t: segmentation logits to be refined,
shape = (batch, ..., num_classes).
Returns:
Sampled segmentation logits, shape = (batch, ..., num_classes).
"""
for x_start_t in self.sample_mask_progressive(image=image, x_t=x_t):
x_start = x_start_t
return x_start
def sample_mask_progressive(
self,
image: jnp.ndarray,
x_t: jnp.ndarray,
) -> Iterator[jnp.ndarray]:
"""Generate segmentation mask from the model conditioned on image.
The noise here is defined on segmentation mask.
x_t is considered as logits.
Args:
image: image to be segmented, shape = (batch, ..., C).
x_t: segmentation logits to be refined,
shape = (batch, ..., num_classes).
Yields:
x_start of shape = (batch, ..., num_classes, T).
"""
for t in reversed(range(self.num_timesteps)):
# (batch, )
t_batch = jnp.array(
[t] * x_t.shape[0],
dtype=jnp.int16,
)
# (batch, ..., ch_input + num_classes)
model_in = jnp.concatenate([image, x_t], axis=-1)
# (batch, ..., num_classes) or (batch, ..., 2*num_classes)
model_out = self.model(model_in, t_batch)
if self.use_ddim:
x_t, x_start = self.ddim_sample(
model_out=model_out,
x_t=x_t,
t=t_batch,
)
else:
x_t, x_start = self.p_sample(
model_out=model_out,
x_t=x_t,
t=t_batch,
)
yield x_start
def predict_xstart_from_xprev_xt(
self, x_prev: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> jnp.ndarray:
"""Get x_0 from x_{t-1} and x_t.
The mean of q(x_{t-1} | x_t, x_0) is coeff_start * x_0 + coeff_t * x_t.
So x_{t-1} = coeff_start * x_0 + coeff_t * x_t.
x_0 = (x_{t-1} - coeff_t * x_t) / coeff_start
= 1/coeff_start * x_{t-1} - coeff_t/coeff_start * x_t
Args:
x_prev: noisy input at t-1, shape (batch, ...).
x_t: noisy input, same shape as x_prev.
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
predicted x_0, same shape as x_prev.
"""
coeff_prev = extract_and_expand(
1.0 / self.posterior_mean_coeff_start, t, x_t.ndim
)
coeff_t = extract_and_expand(
self.posterior_mean_coeff_t / self.posterior_mean_coeff_start,
t,
x_t.ndim,
)
return coeff_prev * x_prev - coeff_t * x_t
def predict_xprev_from_xstart_xt(
self, x_start: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> jnp.ndarray:
"""Get x_{t-1} from x_0 and x_t.
The mean of q(x_{t-1} | x_t, x_0) is coeff_start * x_0 + coeff_t * x_t.
So x_{t-1} = coeff_start * x_0 + coeff_t * x_t.
Args:
x_start: noisy input at t, shape (batch, ...).
x_t: noisy input, same shape as x_start.
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
predicted x_0, same shape as x_prev.
"""
coeff_start = extract_and_expand(
self.posterior_mean_coeff_start, t, x_t.ndim
)
coeff_t = extract_and_expand(
self.posterior_mean_coeff_t,
t,
x_t.ndim,
)
return coeff_start * x_start + coeff_t * x_t
def sample_xprev_from_xstart_xt(
self, x_start: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> jnp.ndarray:
"""Sample x_{t-1} from q(x_{t-1} | x_0, x_t).
The mean of q(x_{t-1} | x_t, x_0) is coeff_start * x_0 + coeff_t * x_t.
So x_{t-1} = coeff_start * x_0 + coeff_t * x_t.
Args:
x_start: noisy input at t, shape (batch, ...).
x_t: noisy input, same shape as x_start.
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
predicted x_0, same shape as x_prev.
"""
x_prev = self.predict_xprev_from_xstart_xt(
x_start=x_start,
x_t=x_t,
t=t,
)
noise = self.noise_sample(shape=x_t.shape, dtype=x_t.dtype)
log_variance = extract_and_expand(
self.posterior_log_variance_clipped, t, x_t.ndim
)
sample = x_prev + noise * log_variance
return self.clip_x(sample)
def predict_xstart_from_epsilon_xt(
self, x_t: jnp.ndarray, epsilon: jnp.ndarray, t: jnp.ndarray
) -> jnp.ndarray:
"""Get x_0 from epsilon.
The reparameterization gives:
x_t = sqrt(alphas_cumprod) * x_0
+ sqrt(1-alphas_cumprod) * epsilon
so,
x_0 = 1/sqrt(alphas_cumprod) * x_t
- sqrt(1-alphas_cumprod)/sqrt(alphas_cumprod) * epsilon
= 1/sqrt(alphas_cumprod) * x_t
- sqrt(1/alphas_cumprod - 1) * epsilon
Args:
x_t: noisy input at t-1, shape (batch, ...).
epsilon: noise, shape (batch, ...), expanded axes have dim 1.
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
predicted x_0, same shape as x_t.
"""
coeff_t = extract_and_expand(
self.sqrt_recip_alphas_cumprod, t, x_t.ndim
)
coeff_epsilon = extract_and_expand(
self.sqrt_recip_alphas_cumprod_minus_one, t, x_t.ndim
)
return coeff_t * x_t - coeff_epsilon * epsilon
def predict_epsilon_from_xstart_xt(
self, x_t: jnp.ndarray, x_start: jnp.ndarray, t: jnp.ndarray
) -> jnp.ndarray:
"""Get epsilon from x_0 and x_t.
The reparameterization gives:
x_t = sqrt(alphas_cumprod) * x_0
+ sqrt(1-alphas_cumprod) * epsilon
so,
epsilon = (x_t - sqrt(alphas_cumprod) * x_0) / sqrt(1-alphas_cumprod)
= (1/sqrt(alphas_cumprod) * x_t - x_0)
/sqrt(1/alphas_cumprod-1)
Args:
x_t: noisy input at t-1, shape (batch, ...).
x_start: predicted x_0, same shape as x_t.
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
predicted x_0, same shape as x_t.
"""
coeff_t = extract_and_expand(
self.sqrt_recip_alphas_cumprod, t, x_t.ndim
)
denominator = extract_and_expand(
self.sqrt_recip_alphas_cumprod_minus_one, t, x_t.ndim
)
return (coeff_t * x_t - x_start) / denominator
def sample_timestep(
self, batch_size: int, min_val: Union[int, jnp.ndarray] = 0
) -> jnp.ndarray:
"""Sample t of shape (batch, ).
Define this function to avoid defining randon key.
Args:
batch_size: number of steps.
min_val: minimum value, inclusive.
Returns:
Time steps with value between 0 and T-1, both sides inclusive.
"""
min_val = jnp.minimum(min_val, self.num_timesteps - 1)
return jax.random.randint(
hk.next_rng_key(),
shape=(batch_size,),
minval=min_val, # inclusive
maxval=self.num_timesteps, # exclusive
)
def noise_sample(
self, shape: Sequence[int], dtype: jnp.dtype
) -> jnp.ndarray:
"""Return a noise of the same shape as input.
Define this function to avoid defining randon key.
Args:
shape: array shape.
dtype: data type.
Returns:
Noise of the same shape and dtype as x.
"""
return self.noise_fn(key=hk.next_rng_key(), shape=shape, dtype=dtype)
def clip_x(self, x: jnp.ndarray) -> jnp.ndarray:
"""Clip the x_start/x_t to desired range.
TODO: where should clip be used?
Args:
x: any array.
Returns:
Clipped array.
"""
if self.x_limit <= 0:
return x
return jnp.clip(x, -self.x_limit, self.x_limit)
def logits_to_x(self, logits: jnp.ndarray) -> jnp.ndarray:
"""Map logits to x space.
Args:
logits: unnormalised logits.
Returns:
Array in the same space as x_start.
"""
if self.x_space == DiffusionSpace.LOGITS:
return logits
if self.x_space == DiffusionSpace.SCALED_PROBS:
x = jax.nn.softmax(logits, axis=-1)
x = x * 2.0 - 1.0
return x
raise ValueError(f"Unknown x space {self.x_space}.")
def x_to_logits(self, x: jnp.ndarray) -> jnp.ndarray:
"""Map x to logits.
Args:
x: in the same space as x_start.
Returns:
Logits.
"""
if self.x_space == DiffusionSpace.LOGITS:
return x
if self.x_space == DiffusionSpace.SCALED_PROBS:
probs = (x + 1) / 2
probs = jnp.clip(probs, EPS, 1.0)
return jnp.log(probs)
raise ValueError(f"Unknown x space {self.x_space}.")
def variational_lower_bound(
self,
model_out: jnp.ndarray,
x_start: jnp.ndarray,
x_t: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Variational lower-bound, smaller is better.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
Args:
model_out: model predicted output, may present different things,
shape (batch, ...).
x_start: cleaned, same shape as x_t.
x_t: noisy input, shape (batch, ...).
t: storing index values < self.num_timesteps, shape (batch, ).
Returns:
lower bounds of shape (batch, ).
"""
reduce_axis = tuple(range(x_t.ndim))[1:]
# q(x_{t-1} | x_t, x_0)
q_mean, q_log_variance = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
# p(x_{t-1} | x_t)
_, p_mean, p_log_variance = self.p_mean_variance(
model_out=model_out,
x_t=x_t,
t=t,
)
kl = normal_kl(
q_mean=q_mean,
q_log_variance=q_log_variance,
p_mean=p_mean,
p_log_variance=p_log_variance,
)
nll = -discretized_gaussian_log_likelihood(
x_start, mean=q_mean, log_variance=q_log_variance
)
# (batch, )
kl = jnp.mean(kl, axis=reduce_axis) / jnp.log(2.0)
nll = jnp.mean(nll, axis=reduce_axis) / jnp.log(2.0)
# return neg-log-likelihood for t = 0
return jnp.where(t == 0, nll, kl)
| 27,736 | 33.413151 | 92 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/diffusion/__init__.py | """Diffusion related functions."""
| 35 | 17 | 34 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/conf/__init__.py | """Package for config files."""
| 32 | 15.5 | 31 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/loss/cross_entropy.py | """Loss functions for classification."""
import jax
import jax.numpy as jnp
import optax
def mean_cross_entropy(
logits: jnp.ndarray,
mask_true: jnp.ndarray,
) -> jnp.ndarray:
"""Cross entropy.
Args:
logits: unscaled prediction, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
Returns:
Cross entropy loss value of shape (1, ).
"""
# (batch, ...)
loss = optax.softmax_cross_entropy(logits=logits, labels=mask_true)
return jnp.mean(loss)
def mean_focal_loss(
logits: jnp.ndarray,
mask_true: jnp.ndarray,
gamma: float = 2.0,
) -> jnp.ndarray:
"""Focal loss.
https://arxiv.org/abs/1708.02002
Args:
logits: unscaled prediction, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
gamma: adjust class imbalance, 0 is equivalent to cross entropy.
Returns:
Focal loss value of shape (1, ).
"""
# normalise logits to be the log of probabilities
logits = jax.nn.log_softmax(logits, axis=-1)
probs = jnp.exp(logits)
focal_loss = -((1 - probs) ** gamma) * logits * mask_true
# (batch, ..., num_classes) -> (batch, ...)
# label are one hot, just sum over class axis
focal_loss = jnp.sum(focal_loss, axis=-1)
return jnp.mean(focal_loss)
| 1,352 | 26.06 | 72 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/loss/dice.py | """Loss functions for image segmentation."""
import jax
import jax.numpy as jnp
def mean_with_background(batch_cls_loss: jnp.ndarray) -> jnp.ndarray:
"""Return average with background class.
Args:
batch_cls_loss: shape (batch, num_classes).
Returns:
Mean loss of shape (1,).
"""
return jnp.nanmean(batch_cls_loss)
def mean_without_background(batch_cls_loss: jnp.ndarray) -> jnp.ndarray:
"""Return average without background class.
Args:
batch_cls_loss: shape (batch, num_classes).
Returns:
Mean loss of shape (1,).
"""
return jnp.nanmean(batch_cls_loss[:, 1:])
def dice_loss(
logits: jnp.ndarray,
mask_true: jnp.ndarray,
) -> jnp.ndarray:
"""Mean dice loss, smaller is better.
Losses are not calculated on instance-classes, where there is no label.
This is to avoid the need of smoothing and potentially nan gradients.
Args:
logits: unscaled prediction, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
Returns:
Dice loss value of shape (batch, num_classes).
"""
mask_pred = jax.nn.softmax(logits)
reduce_axis = tuple(range(mask_pred.ndim))[1:-1]
# (batch, num_classes)
numerator = 2.0 * jnp.sum(mask_pred * mask_true, axis=reduce_axis)
denominator = jnp.sum(mask_pred + mask_true, axis=reduce_axis)
not_nan_mask = jnp.sum(mask_true, axis=reduce_axis) > 0
# nan loss are replaced by 0.0
return jnp.where(
condition=not_nan_mask,
x=1.0 - numerator / denominator,
y=jnp.nan,
)
def mean_dice_loss(
logits: jnp.ndarray,
mask_true: jnp.ndarray,
include_background: bool,
) -> jnp.ndarray:
"""Mean dice loss, smaller is better.
Losses are not calculated on instance-classes, where there is no label.
This is to avoid the need of smoothing and potentially nan gradients.
Args:
logits: unscaled prediction, (batch, ..., num_classes).
mask_true: one hot targets, (batch, ..., num_classes).
include_background: include background as a separate class.
Returns:
Dice loss value of shape (1, ).
"""
loss = dice_loss(logits=logits, mask_true=mask_true)
return jax.lax.cond(
include_background,
mean_with_background,
mean_without_background,
loss,
)
| 2,386 | 27.082353 | 75 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/loss/__init__.py | """Package for loss functions."""
from imgx.loss.cross_entropy import mean_cross_entropy, mean_focal_loss
from imgx.loss.dice import mean_dice_loss
__all__ = [
"mean_cross_entropy",
"mean_focal_loss",
"mean_dice_loss",
]
| 234 | 22.5 | 71 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/model/unet_3d_slice_time.py | """UNet for segmentation."""
import dataclasses
from typing import Callable, List, Tuple
import haiku as hk
import jax
from jax import numpy as jnp
from imgx.model.basic import instance_norm, sinusoidal_positional_embedding
from imgx.model.unet_3d_slice import Conv2dNormAct, Conv2dPool
@dataclasses.dataclass
class TimeConv2dResBlock(hk.Module):
"""Conv2dResBlock with time embedding input.
This class is defined separately to use remat, as remat does not allow
condition loop (if / else).
https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/model.py
"""
out_channels: int
kernel_size: int
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.gelu
def __call__(
self,
x: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward pass.
Args:
x: tensor to be up-sampled, (batch, w, h, in_channels).
t: time embedding, (batch, t_channels).
Returns:
Tensor.
"""
res = x
x = hk.Conv2D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
x = instance_norm(x)
x = self.activation(x)
x = hk.Conv2D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
t = self.activation(t[:, None, None, :])
t = hk.Linear(output_size=self.out_channels)(t)
x += t
x = instance_norm(x)
x = self.activation(x + res)
return x
@dataclasses.dataclass
class Unet3dSliceTime(hk.Module):
"""2D UNet for 3D images.
https://github.com/Project-MONAI/MONAI/blob/dev/monai/networks/nets/basic_unet.py
"""
in_shape: Tuple[int, int, int] # spatial shape
in_channels: int # input channels
out_channels: int
num_channels: Tuple[int, ...] # channel at each depth, including the bottom
num_timesteps: int # T
kernel_size: int = 3
scale_factor: int = 2 # spatial down-sampling/up-sampling
remat: bool = False # remat reduces memory cost at cost of compute speed
def encoder(
self,
image: jnp.ndarray,
t: jnp.ndarray,
) -> List[jnp.ndarray]:
"""Encoder the image.
Args:
image: image tensor of shape (batch, H, W, in_channels).
t: time embedding of shape (batch, t_channels).
Returns:
List of embeddings from each layer.
"""
conv = Conv2dNormAct(
out_channels=self.num_channels[0],
kernel_size=self.kernel_size,
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(image)
conv_t = TimeConv2dResBlock(
out_channels=self.num_channels[0],
kernel_size=self.kernel_size,
)
conv_t = hk.remat(conv_t) if self.remat else conv_t
emb = conv_t(x=emb, t=t)
embeddings = [emb]
for ch in self.num_channels:
conv = Conv2dPool(out_channels=ch, scale_factor=self.scale_factor)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
conv_t = TimeConv2dResBlock(
out_channels=ch,
kernel_size=self.kernel_size,
)
conv_t = hk.remat(conv_t) if self.remat else conv_t
emb = conv_t(x=emb, t=t)
embeddings.append(emb)
return embeddings
def decoder(
self,
embeddings: List[jnp.ndarray],
t: jnp.ndarray,
) -> jnp.ndarray:
"""Decode the embedding and perform prediction.
Args:
embeddings: list of embeddings from each layer.
Starting with the first layer.
t: time embedding of shape (batch, t_channels).
Returns:
Unnormalized logits.
"""
if len(embeddings) != len(self.num_channels) + 1:
raise ValueError("UNet decoder input length does not match")
emb = embeddings[-1]
# calculate up-sampled channel
# [32, 64, 128, 256] -> [32, 32, 64, 128]
channels = self.num_channels[:1] + self.num_channels[:-1]
for i, ch in enumerate(channels[::-1]):
# skipped.shape <= up-scaled shape
# as padding may be added when down-sampling
skipped = embeddings[-i - 2]
skipped_shape = skipped.shape[-3:-1]
# deconv and pad to make emb of same shape as skipped
conv = hk.Conv2DTranspose(
output_channels=ch,
kernel_shape=self.scale_factor,
stride=self.scale_factor,
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
emb = emb[
...,
: skipped_shape[0],
: skipped_shape[1],
:,
]
# add skipped
emb += skipped
# conv
conv_t = TimeConv2dResBlock(
out_channels=ch,
kernel_size=self.kernel_size,
)
conv_t = hk.remat(conv_t) if self.remat else conv_t
emb = conv_t(emb, t)
conv = hk.Conv2D(output_channels=self.out_channels, kernel_shape=1)
conv = hk.remat(conv) if self.remat else conv
out = conv(emb)
return out
def __call__( # type: ignore[no-untyped-def]
self,
image: jnp.ndarray,
t: jnp.ndarray,
**kwargs, # noqa: ARG002
) -> jnp.ndarray:
"""Forward pass.
Args:
image: (batch, h, w, d, in_channels).
t: (batch, ).
kwargs: unused arguments.
Returns:
Predictions (batch, h, w, d, out_channels).
Raises:
ValueError: if input shape does not match.
"""
if image.shape[-4:] != (*self.in_shape, self.in_channels):
raise ValueError(
f"Input shape {image.shape[-4:]} does not match"
f" configs {(*self.in_shape, self.in_channels)}"
)
# (batch, h, w, d, in_channels) -> (batch, d, h, w, in_channels)
image = jnp.transpose(image, (0, 3, 1, 2, 4))
# (batch, d, h, w, in_channels) -> (batch*d, h, w, in_channels)
image = jnp.reshape(image, (-1, *self.in_shape[:2], self.in_channels))
# (batch, ) -> (batch*d,)
t = jnp.repeat(t, repeats=self.in_shape[2], axis=0)
dim_t = self.num_channels[0] * 4
t = sinusoidal_positional_embedding(x=t, dim=dim_t)
embeddings = self.encoder(image=image, t=t)
out = self.decoder(embeddings=embeddings, t=t)
# (batch*d, h, w, out_channels) -> (batch, d, h, w, out_channels)
out = jnp.reshape(
out, (-1, self.in_shape[2], *self.in_shape[:2], self.out_channels)
)
# (batch, d, h, w, out_channels) -> (batch, h, w, d, out_channels)
out = jnp.transpose(out, (0, 2, 3, 1, 4))
return out
| 7,105 | 31.153846 | 95 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/model/unet_3d_time.py | """UNet for segmentation."""
import dataclasses
from typing import Callable, List, Tuple
import haiku as hk
import jax
from jax import numpy as jnp
from imgx.model.basic import instance_norm, sinusoidal_positional_embedding
from imgx.model.unet_3d import Conv3dNormAct, Conv3dPool
@dataclasses.dataclass
class TimeConv3dResBlock(hk.Module):
"""Conv3dResBlock with time embedding input.
This class is defined separately to use remat, as remat does not allow
condition loop (if / else).
https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/model.py
"""
out_channels: int
kernel_size: int
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.gelu
def __call__(
self,
x: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward pass.
Args:
x: tensor to be up-sampled, (batch, w, h, d, in_channels).
t: time embedding, (batch, t_channels).
Returns:
Tensor.
"""
res = x
x = hk.Conv3D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
x = instance_norm(x)
x = self.activation(x)
x = hk.Conv3D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
t = self.activation(t[:, None, None, None, :])
t = hk.Linear(output_size=self.out_channels)(t)
x += t
x = instance_norm(x)
x = self.activation(x + res)
return x
@dataclasses.dataclass
class Unet3dTime(hk.Module):
"""3D UNet.
https://github.com/Project-MONAI/MONAI/blob/dev/monai/networks/nets/basic_unet.py
"""
in_shape: Tuple[int, int, int] # spatial shape
in_channels: int # input channels
out_channels: int
num_channels: Tuple[int, ...] # channel at each depth, including the bottom
num_timesteps: int # T
kernel_size: int = 3
scale_factor: int = 2 # spatial down-sampling/up-sampling
remat: bool = False # remat reduces memory cost at cost of compute speed
def encoder(
self,
image: jnp.ndarray,
t: jnp.ndarray,
) -> List[jnp.ndarray]:
"""Encoder the image.
Args:
image: image tensor of shape (batch, H, W, D, in_channels).
t: time embedding of shape (batch, t_channels).
Returns:
List of embeddings from each layer.
"""
conv = Conv3dNormAct(
out_channels=self.num_channels[0],
kernel_size=self.kernel_size,
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(image)
conv_t = TimeConv3dResBlock(
out_channels=self.num_channels[0],
kernel_size=self.kernel_size,
)
conv_t = hk.remat(conv_t) if self.remat else conv_t
emb = conv_t(x=emb, t=t)
embeddings = [emb]
for ch in self.num_channels:
conv = Conv3dPool(out_channels=ch, scale_factor=self.scale_factor)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
conv_t = TimeConv3dResBlock(
out_channels=ch,
kernel_size=self.kernel_size,
)
conv_t = hk.remat(conv_t) if self.remat else conv_t
emb = conv_t(x=emb, t=t)
embeddings.append(emb)
return embeddings
def decoder(
self,
embeddings: List[jnp.ndarray],
t: jnp.ndarray,
) -> jnp.ndarray:
"""Decode the embedding and perform prediction.
Args:
embeddings: list of embeddings from each layer.
Starting with the first layer.
t: time embedding of shape (batch, t_channels).
Returns:
Unnormalized logits.
"""
if len(embeddings) != len(self.num_channels) + 1:
raise ValueError("UNet decoder input length does not match")
emb = embeddings[-1]
# calculate up-sampled channel
# [32, 64, 128, 256] -> [32, 32, 64, 128]
channels = self.num_channels[:1] + self.num_channels[:-1]
for i, ch in enumerate(channels[::-1]):
# skipped.shape <= up-scaled shape
# as padding may be added when down-sampling
skipped = embeddings[-i - 2]
skipped_shape = skipped.shape[-4:-1]
# deconv and pad to make emb of same shape as skipped
conv = hk.Conv3DTranspose(
output_channels=ch,
kernel_shape=self.scale_factor,
stride=self.scale_factor,
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
emb = emb[
...,
: skipped_shape[0],
: skipped_shape[1],
: skipped_shape[2],
:,
]
# add skipped
emb += skipped
# conv
conv_t = TimeConv3dResBlock(
out_channels=ch,
kernel_size=self.kernel_size,
)
conv_t = hk.remat(conv_t) if self.remat else conv_t
emb = conv_t(emb, t)
conv = hk.Conv3D(output_channels=self.out_channels, kernel_shape=1)
conv = hk.remat(conv) if self.remat else conv
out = conv(emb)
return out
def __call__( # type: ignore[no-untyped-def]
self,
image: jnp.ndarray,
t: jnp.ndarray,
**kwargs, # noqa: ARG002
) -> jnp.ndarray:
"""Forward pass.
Args:
image: (batch, h, w, d, in_channels).
t: (batch, ).
kwargs: unused arguments.
Returns:
Predictions (batch, h, w, d, out_channels).
Raises:
ValueError: if input shape does not match.
"""
if image.shape[-4:] != (*self.in_shape, self.in_channels):
raise ValueError(
f"Input shape {image.shape[-4:]} does not match"
f" configs {(*self.in_shape, self.in_channels)}"
)
dim_t = self.num_channels[0] * 4
t = sinusoidal_positional_embedding(x=t, dim=dim_t)
embeddings = self.encoder(image=image, t=t)
out = self.decoder(embeddings=embeddings, t=t)
return out
| 6,438 | 30.10628 | 95 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/model/unet_3d_slice.py | """UNet for segmentation."""
import dataclasses
from typing import Callable, List, Tuple
import haiku as hk
import jax
from jax import numpy as jnp
from imgx.model.basic import instance_norm
@dataclasses.dataclass
class Conv2dNormAct(hk.Module):
"""Block with conv2d-norm-act."""
out_channels: int
kernel_size: int
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.gelu
def __call__(
self,
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward pass.
Args:
x: tensor to be up-sampled.
Returns:
Tensor.
"""
x = hk.Conv2D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
x = instance_norm(x)
x = self.activation(x)
return x
@dataclasses.dataclass
class Conv2dResBlock(hk.Module):
"""Block with two conv2d-norm-act layers and residual link."""
out_channels: int
kernel_size: int
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.gelu
def __call__(
self,
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward pass.
Args:
x: tensor to be up-sampled.
Returns:
Tensor.
"""
res = x
x = hk.Conv2D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
x = instance_norm(x)
x = self.activation(x)
x = hk.Conv2D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
x = instance_norm(x)
x = self.activation(x + res)
return x
@dataclasses.dataclass
class Conv2dPool(hk.Module):
"""Patch merging, a down-sample layer."""
out_channels: int
scale_factor: int
def __call__(
self,
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward.
Args:
x: shape (batch, h, w, d, in_channels).
Returns:
Down-sampled array.
"""
x = hk.Conv2D(
output_channels=self.out_channels,
kernel_shape=self.scale_factor,
stride=self.scale_factor,
with_bias=False,
)(x)
x = instance_norm(x)
return x
@dataclasses.dataclass
class Unet3dSlice(hk.Module):
"""2D UNet for 3D images.
https://github.com/Project-MONAI/MONAI/blob/dev/monai/networks/nets/basic_unet.py
"""
in_shape: Tuple[int, int, int] # spatial shape
in_channels: int # input channels
out_channels: int
num_channels: Tuple[int, ...] # channel at each depth, including the bottom
kernel_size: int = 3
scale_factor: int = 2 # spatial down-sampling/up-sampling
remat: bool = False # remat reduces memory cost at cost of compute speed
def encoder(
self,
image: jnp.ndarray,
) -> List[jnp.ndarray]:
"""Encoder the image.
Args:
image: image tensor of shape (batch, H, W, C).
Returns:
List of embeddings from each layer.
"""
conv = hk.Sequential(
[
Conv2dNormAct(
out_channels=self.num_channels[0],
kernel_size=self.kernel_size,
),
Conv2dResBlock(
out_channels=self.num_channels[0],
kernel_size=self.kernel_size,
),
]
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(image)
embeddings = [emb]
for ch in self.num_channels:
conv = hk.Sequential(
[
Conv2dPool(out_channels=ch, scale_factor=self.scale_factor),
Conv2dResBlock(
out_channels=ch,
kernel_size=self.kernel_size,
),
]
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
embeddings.append(emb)
return embeddings
def decoder(
self,
embeddings: List[jnp.ndarray],
) -> jnp.ndarray:
"""Decode the embedding and perform prediction.
Args:
embeddings: list of embeddings from each layer.
Starting with the first layer.
Returns:
Unnormalized logits.
"""
if len(embeddings) != len(self.num_channels) + 1:
raise ValueError("UNet decoder input length does not match")
emb = embeddings[-1]
# calculate up-sampled channel
# [32, 64, 128, 256] -> [32, 32, 64, 128]
channels = self.num_channels[:1] + self.num_channels[:-1]
for i, ch in enumerate(channels[::-1]):
# skipped.shape <= up-scaled shape
# as padding may be added when down-sampling
skipped = embeddings[-i - 2]
skipped_shape = skipped.shape[-3:-1]
# deconv and pad to make emb of same shape as skipped
conv = hk.Conv2DTranspose(
output_channels=ch,
kernel_shape=self.scale_factor,
stride=self.scale_factor,
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
emb = emb[
...,
: skipped_shape[0],
: skipped_shape[1],
:,
]
# add skipped
emb += skipped
# conv
conv = Conv2dResBlock(
out_channels=ch,
kernel_size=self.kernel_size,
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
conv = hk.Conv2D(output_channels=self.out_channels, kernel_shape=1)
conv = hk.remat(conv) if self.remat else conv
out = conv(emb)
return out
def __call__( # type: ignore[no-untyped-def]
self,
image: jnp.ndarray,
**kwargs, # noqa: ARG002
) -> jnp.ndarray:
"""Forward pass.
Args:
image: (batch, h, w, d, in_channels).
kwargs: unused arguments.
Returns:
Predictions (batch, h, w, d, out_channels).
Raises:
ValueError: if input shape does not match.
"""
if image.shape[-4:] != (*self.in_shape, self.in_channels):
raise ValueError(
f"Input shape {image.shape[-3:]} does not match"
f" configs {(*self.in_shape, self.in_channels)}"
)
# (batch, h, w, d, in_channels) -> (batch, d, h, w, in_channels)
image = jnp.transpose(image, (0, 3, 1, 2, 4))
# (batch, d, h, w, in_channels) -> (batch*d, h, w, in_channels)
image = jnp.reshape(image, (-1, *self.in_shape[:2], self.in_channels))
embeddings = self.encoder(image=image)
out = self.decoder(embeddings=embeddings)
# (batch*d, h, w, out_channels) -> (batch, d, h, w, out_channels)
out = jnp.reshape(
out, (-1, self.in_shape[2], *self.in_shape[:2], self.out_channels)
)
# (batch, d, h, w, out_channels) -> (batch, h, w, d, out_channels)
out = jnp.transpose(out, (0, 2, 3, 1, 4))
return out
| 7,387 | 27.635659 | 85 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/model/__init__.py | """Package for models."""
from imgx.model.unet_3d import Unet3d # noqa: F401
from imgx.model.unet_3d_slice import Unet3dSlice # noqa: F401
from imgx.model.unet_3d_slice_time import Unet3dSliceTime # noqa: F401
from imgx.model.unet_3d_time import Unet3dTime # noqa: F401
SUPPORTED_VISION_MODELS = [
"Unet3d",
"Unet3dSlice",
"Unet3dTime",
"Unet3dSliceTime",
]
# TODO uniform name
MODEL_CLS_NAME_TO_CONFIG_NAME = {
"Unet3d": "unet3d",
"Unet3dSlice": "unet3d_slice",
"Unet3dTime": "unet3d_time",
"Unet3dSliceTime": "unet3d_slice_time",
}
CONFIG_NAME_TO_MODEL_CLS_NAME = {
v: k for k, v in MODEL_CLS_NAME_TO_CONFIG_NAME.items()
}
__all__ = SUPPORTED_VISION_MODELS
| 701 | 27.08 | 71 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/model/unet_3d.py | """UNet for segmentation."""
import dataclasses
from typing import Callable, List, Tuple
import haiku as hk
import jax
from jax import numpy as jnp
from imgx.model.basic import instance_norm
@dataclasses.dataclass
class Conv3dNormAct(hk.Module):
"""Block with conv3d-norm-act."""
out_channels: int
kernel_size: int
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.gelu
def __call__(
self,
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward pass.
Args:
x: tensor to be up-sampled.
Returns:
Tensor.
"""
x = hk.Conv3D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
x = instance_norm(x)
x = self.activation(x)
return x
@dataclasses.dataclass
class Conv3dResBlock(hk.Module):
"""Block with two conv3d-norm-act layers and residual link."""
out_channels: int
kernel_size: int
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.gelu
def __call__(
self,
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward pass.
Args:
x: tensor to be up-sampled.
Returns:
Tensor.
"""
res = x
x = hk.Conv3D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
x = instance_norm(x)
x = self.activation(x)
x = hk.Conv3D(
output_channels=self.out_channels,
kernel_shape=self.kernel_size,
with_bias=False,
)(x)
x = instance_norm(x)
x = self.activation(x + res)
return x
@dataclasses.dataclass
class Conv3dPool(hk.Module):
"""Patch merging, a down-sample layer."""
out_channels: int
scale_factor: int
def __call__(
self,
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward.
Args:
x: shape (batch, h, w, d, in_channels).
Returns:
Down-sampled array.
"""
x = hk.Conv3D(
output_channels=self.out_channels,
kernel_shape=self.scale_factor,
stride=self.scale_factor,
with_bias=False,
)(x)
x = instance_norm(x)
return x
@dataclasses.dataclass
class Unet3d(hk.Module):
"""3D UNet.
https://github.com/Project-MONAI/MONAI/blob/dev/monai/networks/nets/basic_unet.py
"""
in_shape: Tuple[int, int, int] # spatial shape
in_channels: int # input channels
out_channels: int
num_channels: Tuple[int, ...] # channel at each depth, including the bottom
kernel_size: int = 3
scale_factor: int = 2 # spatial down-sampling/up-sampling
remat: bool = False # remat reduces memory cost at cost of compute speed
def encoder(
self,
image: jnp.ndarray,
) -> List[jnp.ndarray]:
"""Encoder the image.
Args:
image: image tensor of shape (batch, H, W, D, C).
Returns:
List of embeddings from each layer.
"""
conv = hk.Sequential(
[
Conv3dNormAct(
out_channels=self.num_channels[0],
kernel_size=self.kernel_size,
),
Conv3dResBlock(
out_channels=self.num_channels[0],
kernel_size=self.kernel_size,
),
]
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(image)
embeddings = [emb]
for ch in self.num_channels:
conv = hk.Sequential(
[
Conv3dPool(out_channels=ch, scale_factor=self.scale_factor),
Conv3dResBlock(
out_channels=ch,
kernel_size=self.kernel_size,
),
]
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
embeddings.append(emb)
return embeddings
def decoder(
self,
embeddings: List[jnp.ndarray],
) -> jnp.ndarray:
"""Decode the embedding and perform prediction.
Args:
embeddings: list of embeddings from each layer.
Starting with the first layer.
Returns:
Unnormalized logits.
"""
if len(embeddings) != len(self.num_channels) + 1:
raise ValueError("UNet decoder input length does not match")
emb = embeddings[-1]
# calculate up-sampled channel
# [32, 64, 128, 256] -> [32, 32, 64, 128]
channels = self.num_channels[:1] + self.num_channels[:-1]
for i, ch in enumerate(channels[::-1]):
# skipped.shape <= up-scaled shape
# as padding may be added when down-sampling
skipped = embeddings[-i - 2]
skipped_shape = skipped.shape[-4:-1]
# deconv and pad to make emb of same shape as skipped
conv = hk.Conv3DTranspose(
output_channels=ch,
kernel_shape=self.scale_factor,
stride=self.scale_factor,
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
emb = emb[
...,
: skipped_shape[0],
: skipped_shape[1],
: skipped_shape[2],
:,
]
# add skipped
emb += skipped
# conv
conv = Conv3dResBlock(
out_channels=ch,
kernel_size=self.kernel_size,
)
conv = hk.remat(conv) if self.remat else conv
emb = conv(emb)
conv = hk.Conv3D(output_channels=self.out_channels, kernel_shape=1)
conv = hk.remat(conv) if self.remat else conv
out = conv(emb)
return out
def __call__( # type: ignore[no-untyped-def]
self,
image: jnp.ndarray,
**kwargs, # noqa: ARG002
) -> jnp.ndarray:
"""Forward pass.
Args:
image: (batch, h, w, d, in_channels).
kwargs: unused arguments.
Returns:
Predictions (batch, h, w, d, out_channels).
Raises:
ValueError: if input shape does not match.
"""
if image.shape[-4:] != (*self.in_shape, self.in_channels):
raise ValueError(
f"Input shape {image.shape[-3:]} does not match"
f" configs {(*self.in_shape, self.in_channels)}"
)
embeddings = self.encoder(image=image)
out = self.decoder(embeddings=embeddings)
return out
| 6,811 | 26.691057 | 85 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/model/basic.py | """Basic functions and modules."""
import haiku as hk
from jax import numpy as jnp
def layer_norm(x: jnp.ndarray) -> jnp.ndarray:
"""Applies a unique LayerNorm at the last axis.
Args:
x: input
Returns:
Normalised input.
"""
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)
def instance_norm(x: jnp.ndarray) -> jnp.ndarray:
"""Applies a unique InstanceNorm.
Args:
x: input
Returns:
Normalised input.
"""
return hk.InstanceNorm(create_scale=True, create_offset=True)(x)
def dropout(x: jnp.ndarray, dropout_rate: float) -> jnp.ndarray:
"""Applies dropout only if necessary.
This function is necessary to avoid defining random key for testing.
Otherwise, calling `hk.dropout` will result in the following error:
You must pass a non-None PRNGKey to init and/or apply
if you make use of random numbers.
Args:
x: input
dropout_rate: rate of dropout
Returns:
Dropout applied input.
"""
if dropout_rate == 0.0: # noqa: PLR2004
return x
return hk.dropout(hk.next_rng_key(), dropout_rate, x)
def sinusoidal_positional_embedding(
x: jnp.ndarray, dim: int, max_period: int = 10000
) -> jnp.ndarray:
"""Create sinusoidal timestep embeddings.
Half defined by sin, half by cos.
For position x, the embeddings are (for i = 0,...,half_dim-1)
sin(x / (max_period ** (i/half_dim)))
cos(x / (max_period ** (i/half_dim)))
Args:
x: (batch, ), with non-negative values.
dim: embedding dimension, assume to be evenly divided by two.
max_period: controls the minimum frequency of the embeddings.
Returns:
Embedding of size (batch, dim).
"""
half_dim = dim // 2
# (half_dim,)
freq = jnp.arange(0, half_dim, dtype=jnp.float32)
freq = jnp.exp(-jnp.log(max_period) * freq / half_dim)
# (batch, half_dim)
args = x[:, None] * freq[None, :]
# (batch, dim)
return jnp.concatenate([jnp.cos(args), jnp.sin(args)], axis=-1)
| 2,088 | 26.486842 | 74 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/train_state.py | """Training state and checkpoints."""
import pickle
from pathlib import Path
from typing import Optional, Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jmp
import numpy as np
import optax
from imgx.device import broadcast_to_local_devices, get_first_replica_values
CHECKPOINT_ATTRS = [
"params",
"network_state",
"opt_state",
"ema_network_state",
"ema_params",
]
@chex.dataclass
class TrainState:
"""Dataclass to keep track of state of training.
The state of training is structured as a chex.dataclass, which enables
instances of this class to be passed into jax transformations like tree_map
and pmap.
The stored values are broadcast across devices.
"""
params: hk.Params
network_state: hk.State
opt_state: optax.OptState
loss_scale: jmp.LossScale
global_step: jnp.array
rng: jax.random.PRNGKey
ema_params: Optional[hk.Params] = None
ema_network_state: Optional[hk.State] = None
def save_array_tree(ckpt_dir: Path, state: chex.ArrayTree) -> None:
"""Save the state with arrays and tree saved separately.
Args:
ckpt_dir: directory to save.
state: state to save, including params, optimizer, etc.
"""
ckpt_dir.mkdir(parents=True, exist_ok=True)
with open(ckpt_dir / "arrays.npy", "wb") as f:
for x in jax.tree_util.tree_leaves(state):
np.save(f, x, allow_pickle=False)
tree_struct = jax.tree_map(lambda _: 0, state)
with open(ckpt_dir / "tree.pkl", "wb") as f:
pickle.dump(tree_struct, f)
def restore_array_tree(ckpt_dir: Path) -> chex.ArrayTree:
"""Restore the state from saved files.
Args:
ckpt_dir: directory to load.
Returns:
Restored state, including params, optimizer, etc.
"""
with open(ckpt_dir / "tree.pkl", "rb") as f:
tree_struct = pickle.load(f)
leaves, treedef = jax.tree_util.tree_flatten(tree_struct)
with open(ckpt_dir / "arrays.npy", "rb") as f:
flat_state = [np.load(f) for _ in leaves]
return jax.tree_util.tree_unflatten(treedef, flat_state)
def save_ckpt(train_state: TrainState, ckpt_dir: Path) -> None:
"""Save the state with arrays and tree saved separately.
Args:
train_state: checkpoint to save.
ckpt_dir: directory to save.
"""
train_state = jax.tree_map(get_first_replica_values, train_state)
state_dict = dict(train_state) # type: ignore[call-overload]
# loss_scale needs to be stored differently
loss_scale = state_dict["loss_scale"]
del state_dict["loss_scale"]
loss_scale_type = loss_scale.__class__.__name__
state_dict["loss_scale_type"] = loss_scale_type
if isinstance(loss_scale, jmp.StaticLossScale):
state_dict["loss_scale"] = loss_scale.loss_scale
elif isinstance(loss_scale, jmp.DynamicLossScale):
state_dict["loss_scale"] = loss_scale.loss_scale
state_dict["loss_scale_counter"] = loss_scale.counter
state_dict["loss_scale_period"] = loss_scale.period
state_dict["loss_scale_factor"] = loss_scale.factor
save_array_tree(ckpt_dir=ckpt_dir, state=state_dict)
def restore_ckpt(ckpt_dir: Path) -> TrainState:
"""Restore the state from saved files.
Args:
ckpt_dir: directory to load.
Returns:
train_state: checkpoint to save.
global_step: number of batch consumed.
"""
state_dict = restore_array_tree(ckpt_dir)
# loss_scale needs to be loaded differently
loss_scale_type = state_dict["loss_scale_type"]
del state_dict["loss_scale_type"]
if loss_scale_type == "NoOpLossScale":
loss_scale = jmp.NoOpLossScale()
elif loss_scale_type == "StaticLossScale":
loss_scale = state_dict["loss_scale"]
del state_dict["loss_scale"]
loss_scale = jmp.StaticLossScale(loss_scale)
elif loss_scale_type == "DynamicLossScale":
loss_scale = state_dict["loss_scale"]
counter = state_dict["loss_scale_counter"]
# factor and period are ints not arrays
period = int(state_dict["loss_scale_period"])
factor = int(state_dict["loss_scale_factor"])
del state_dict["loss_scale"]
del state_dict["loss_scale_counter"]
del state_dict["loss_scale_period"]
del state_dict["loss_scale_factor"]
loss_scale = jmp.DynamicLossScale(
loss_scale=loss_scale, counter=counter, period=period, factor=factor
)
else:
raise ValueError(f"Unknown loss_scale type {loss_scale_type}.")
# TODO should consider shards
state_dict = jax.tree_map(broadcast_to_local_devices, state_dict)
train_state = TrainState( # type: ignore[call-arg]
params=state_dict["params"],
network_state=state_dict["network_state"],
opt_state=state_dict["opt_state"],
loss_scale=loss_scale,
global_step=state_dict["global_step"],
rng=state_dict["rng"],
ema_params=state_dict.get("ema_params", None),
ema_network_state=state_dict.get("ema_network_state", None),
)
return train_state
def get_eval_params_and_state(
train_state: TrainState,
) -> Tuple[hk.Params, hk.State]:
"""Get the parameters and state for evaluation.
Args:
train_state: Train State.
Returns:
params, state.
"""
if train_state.ema_params is not None:
params = train_state.ema_params
state = train_state.ema_network_state
else:
params = train_state.params
state = train_state.network_state
return get_first_replica_values(params), get_first_replica_values(state)
def get_eval_params_and_state_from_ckpt(
ckpt_dir: Path,
use_ema: bool,
) -> Tuple[hk.Params, hk.State]:
"""Get the parameters and state for evaluation from checkpoint.
Args:
ckpt_dir: directory to load.
use_ema: use EMA or not.
Returns:
Broadcast params, state.
"""
state_dict = restore_array_tree(ckpt_dir)
if use_ema:
params = state_dict["ema_params"]
state = state_dict["ema_network_state"]
else:
params = state_dict["params"]
state = state_dict["network_state"]
# make sure arrays are initialised in CPU
with jax.default_device(jax.devices("cpu")[0]):
params = jax.tree_map(jnp.asarray, params)
state = jax.tree_map(jnp.asarray, state)
# broadcast to other devices
params = broadcast_to_local_devices(params)
state = broadcast_to_local_devices(state)
return params, state
| 6,560 | 30.695652 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/optim.py | """Module for optimization."""
import logging
from typing import Tuple
import jax
import jax.numpy as jnp
import optax
from omegaconf import DictConfig
def ema_update(
ema_value: jnp.ndarray,
current_value: jnp.ndarray,
decay: float,
step: jnp.ndarray,
) -> jnp.ndarray:
"""Implements exponential moving average (EMA) with TF1-style decay warmup.
Args:
ema_value: previous value.
current_value: current value.
decay: decay ratio.
step: number of steps so far.
Returns:
updated value.
"""
decay = jnp.minimum(decay, (1.0 + step) / (10.0 + step))
return ema_value * decay + current_value * (1 - decay)
def get_lr_schedule(config: DictConfig) -> optax.Schedule:
"""Get learning rate scheduler.
Args:
config: entire configuration.
Returns:
Scheduler
"""
return optax.warmup_cosine_decay_schedule(**config.optimizer.lr_schedule)
def get_every_k_schedule(config: DictConfig) -> int:
"""Get k for gradient accumulations.
Args:
config: entire configuration.
Returns:
k, where gradients are accumulated every k step.
"""
num_devices_per_replica = config.training.num_devices_per_replica
batch_size_per_replica = config.training.batch_size_per_replica
num_replicas = jax.local_device_count() // num_devices_per_replica
batch_size_per_step = batch_size_per_replica * num_replicas
if config.training.batch_size < batch_size_per_step:
raise ValueError(
f"Batch size {config.training.batch_size} is too small. "
f"batch_size_per_replica * num_replicas = "
f"{batch_size_per_replica} * {num_replicas} = "
f"{batch_size_per_step}."
)
if config.training.batch_size % batch_size_per_step != 0:
raise ValueError(
"Batch size cannot be evenly divided by batch size per step."
)
every_k_schedule = config.training.batch_size // batch_size_per_step
if every_k_schedule > 1:
logging.info(
f"Using gradient accumulation. "
f"Each model duplicate is stored across {num_devices_per_replica} "
f"shard{'s' if num_devices_per_replica > 1 else ''}. "
f"Each step has {batch_size_per_step} samples. "
f"Gradients are averaged every {every_k_schedule} steps. "
f"Effective batch size is {config.training.batch_size}."
)
return every_k_schedule
def init_optimizer(
config: DictConfig,
) -> Tuple[optax.GradientTransformation, int]:
"""Initialize optimizer.
Args:
config: entire configuration.
Returns:
optimizer and every_k_schedule.
"""
lr_schedule = get_lr_schedule(config)
optimizer = optax.chain(
optax.clip_by_global_norm(config.optimizer.grad_norm),
getattr(optax, config.optimizer.name)(
learning_rate=lr_schedule, **config.optimizer.kwargs
),
)
# accumulate gradient when needed
every_k_schedule = get_every_k_schedule(config)
if every_k_schedule == 1:
# no need to accumulate gradient
return optimizer, every_k_schedule
optimizer = optax.MultiSteps(optimizer, every_k_schedule=every_k_schedule)
return optimizer, every_k_schedule
| 3,308 | 30.216981 | 79 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/experiment.py | """Module for launching experiments."""
import logging
from functools import partial
from pathlib import Path
from typing import Callable, Dict, Mapping, Optional, Tuple, Union
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jmp
import optax
import tensorflow as tf
from omegaconf import DictConfig
from imgx import IMAGE, REPLICA_AXIS, TEST_SPLIT, VALID_SPLIT
from imgx.datasets import IMAGE_SHAPE_MAP
from imgx.datasets.augmentation import build_aug_fn_from_config
from imgx.datasets.iterator import get_image_tfds_dataset
from imgx.datasets.util import py_prefetch
from imgx.device import (
bind_rng_to_host_or_device,
broadcast_to_local_devices,
get_first_replica_values,
is_tpu,
)
from imgx.exp.eval import build_batch_eval_fn, build_dataset_eval_fn
from imgx.exp.loss import build_loss_fn
from imgx.exp.mixed_precision import get_mixed_precision_policy, select_tree
from imgx.exp.optim import ema_update, get_lr_schedule, init_optimizer
from imgx.exp.train_state import TrainState
def init_train_state(
batch: chex.ArrayTree,
rng: jax.random.PRNGKey,
loss_init: Callable,
config: DictConfig,
) -> TrainState:
"""Initialize train_state.
Args:
batch: a batch example.
rng: random key.
loss_init: init function of loss.
config: entire configuration.
"""
config_mp = config.training.mixed_precision
config_ema = config.training.ema
# init network
rng, train_rng = jax.random.split(rng)
params, network_state = loss_init(rng, batch)
ema_params = params if config_ema.use else None
ema_network_state = network_state if config_ema.use else None
# count params on one device
params_count = sum(x.size for x in jax.tree_util.tree_leaves(params))
logging.info(f"The model has {params_count:,} parameters.")
# init optimizer state
optimizer, _ = init_optimizer(config=config)
opt_state = optimizer.init(params)
# init loss_scale
# it is necessary to use NoOpLossScale even not intended to use mp
# otherwise, some unknown default policy may be used
# resulted in non-converging losses and nans
loss_scale = jmp.NoOpLossScale()
if config_mp.use and (not is_tpu()):
# no need to scale on TPU
# https://cloud.google.com/tpu/docs/bfloat16
scale = jmp.half_dtype()(2**15)
loss_scale = jmp.DynamicLossScale(scale)
global_step = jnp.array(0, dtype=jnp.int32)
return TrainState( # type: ignore[call-arg]
params=params,
network_state=network_state,
opt_state=opt_state,
loss_scale=loss_scale,
global_step=global_step,
rng=train_rng,
ema_params=ema_params,
ema_network_state=ema_network_state,
)
def update_parameters(
train_state: TrainState,
batch: Mapping[str, chex.ArrayTree],
loss_apply: Callable,
config: DictConfig,
) -> Tuple[TrainState, chex.ArrayTree]:
"""Updates parameters.
Mixed precision references:
- https://github.com/deepmind/jmp
- https://github.com/deepmind/dm-haiku/blob/main/examples/imagenet
Args:
train_state: training state.
batch: training data.
loss_apply: apply of loss function.
config: entire configuration.
Returns:
train_state: training state.
scalars: metric dict.
"""
def loss_fn(
params: hk.Params,
network_state: hk.State,
loss_scale: jmp.LossScale,
rng_key: jax.random.PRNGKey,
batch_data: chex.ArrayTree,
) -> Tuple[chex.ArrayTree, Tuple[chex.ArrayTree, hk.State]]:
"""Regroup loss output.
Args:
params: network parameters.
network_state: network state.
loss_scale: scale loss for mixed precision.
rng_key: random key.
batch_data: data of a batch.
Returns:
- loss
- (metric dict, model state)
"""
(loss, batch_scalars), network_state = loss_apply(
params, network_state, rng_key, batch_data
)
return loss_scale.scale(loss), (batch_scalars, network_state)
config_mp = config.training.mixed_precision
config_ema = config.training.ema
aug_fn = build_aug_fn_from_config(config)
# get random key for the step
rng, step_rng = jax.random.split(train_state.rng)
aug_rng, step_rng = jax.random.split(step_rng)
aug_rng = bind_rng_to_host_or_device(
aug_rng, bind_to="device", axis_name=REPLICA_AXIS
)
step_rng = bind_rng_to_host_or_device(
step_rng, bind_to="device", axis_name=REPLICA_AXIS
)
# data augmentation
batch = aug_fn(aug_rng, batch)
# gradient calculation
grad_loss_fn = jax.grad(loss_fn, has_aux=True)
grads, (scalars, updated_network_state) = grad_loss_fn(
train_state.params,
train_state.network_state,
train_state.loss_scale,
step_rng,
batch,
)
scalars["grad_norm_before_pmean"] = optax.global_norm(grads)
scalars["params_norm"] = optax.global_norm(train_state.params)
# grads are in "param_dtype" (likely float32)
# cast them back to compute dtype such that
# we do the all-reduce below in the compute precision
# which is typically lower than the param precision
policy = get_mixed_precision_policy(config_mp.use)
grads = policy.cast_to_compute(grads)
grads = train_state.loss_scale.unscale(grads)
# take the mean across all replicas to keep params in sync
grads = jax.lax.pmean(grads, axis_name=REPLICA_AXIS)
# compute our optimizer update in the same precision as params
grads = policy.cast_to_param(grads)
# update parameters
optimizer, every_k_schedule = init_optimizer(config=config)
updates, updated_opt_state = optimizer.update(
grads, train_state.opt_state, train_state.params
)
updated_params = optax.apply_updates(train_state.params, updates)
scalars["lr"] = get_lr_schedule(config)(
train_state.global_step // every_k_schedule
)
scalars["grad_norm"] = optax.global_norm(grads)
scalars["grad_update_norm"] = optax.global_norm(updates)
grads_finite = jmp.all_finite(grads)
updated_loss_scale = train_state.loss_scale.adjust(grads_finite)
# mixed precision or not, skip non-finite gradients
(updated_params, updated_network_state, updated_opt_state) = select_tree(
grads_finite,
(updated_params, updated_network_state, updated_opt_state),
(
train_state.params,
train_state.network_state,
train_state.opt_state,
),
)
scalars["loss_scale"] = updated_loss_scale.loss_scale
# average metrics across replicas
min_scalars = {}
max_scalars = {}
mean_scalars = {}
for k in scalars:
if k.startswith("min_"):
min_scalars[k] = scalars[k]
elif k.startswith("max_"):
max_scalars[k] = scalars[k]
else:
mean_scalars[k] = scalars[k]
min_scalars = jax.lax.pmin(min_scalars, axis_name=REPLICA_AXIS)
max_scalars = jax.lax.pmax(max_scalars, axis_name=REPLICA_AXIS)
mean_scalars = jax.lax.pmean(mean_scalars, axis_name=REPLICA_AXIS)
scalars = {
**min_scalars,
**max_scalars,
**mean_scalars,
}
# update train_state
train_state = train_state.replace(
params=updated_params,
network_state=updated_network_state,
opt_state=updated_opt_state,
loss_scale=updated_loss_scale,
rng=rng,
)
if train_state.ema_params is not None:
ema = partial(
ema_update,
decay=config_ema.decay,
step=train_state.global_step,
)
ema_params = jax.tree_map(ema, train_state.ema_params, updated_params)
ema_network_state = jax.tree_map(
ema, train_state.ema_network_state, updated_network_state
)
train_state = train_state.replace(
ema_params=ema_params,
ema_network_state=ema_network_state,
)
train_state = train_state.replace(
global_step=train_state.global_step + 1,
)
return train_state, scalars
def batch_eval(
batch: Mapping[str, chex.ArrayTree],
config: DictConfig,
) -> Tuple[chex.ArrayTree, chex.ArrayTree]:
"""Calculate prediction and metrics given a batch, without loss.
Args:
batch: input batch data.
config: entire configuration.
Returns:
- metrics.
- prediction.
"""
eval_fn = build_batch_eval_fn(
config=config,
)
return eval_fn(batch)
class Experiment:
"""Experiment for supervised training."""
def __init__(self, config: DictConfig) -> None:
"""Initializes experiment.
Args:
config: experiment config.
"""
# Do not use accelerators in data pipeline.
tf.config.experimental.set_visible_devices([], device_type="GPU")
tf.config.experimental.set_visible_devices([], device_type="TPU")
# save args
self.config = config
# init data loaders and networks
self.dataset = get_image_tfds_dataset(
dataset_name=self.config.data.name,
config=self.config,
)
self.train_iter = py_prefetch(lambda: self.dataset.train_iter)
self.valid_iter = py_prefetch(lambda: self.dataset.valid_iter)
self.test_iter = py_prefetch(lambda: self.dataset.test_iter)
def train_init(self) -> TrainState:
"""Initialize data loader, loss, networks for training.
Returns:
initialized training state.
"""
# init loss
loss = hk.transform_with_state(build_loss_fn(config=self.config))
# the batch is for multi-devices
# (num_models, ...)
# num_models is not the same as num_devices_per_replica
batch = next(self.train_iter)
batch = get_first_replica_values(batch)
# check image size
data_config = self.config["data"]
image_shape = IMAGE_SHAPE_MAP[data_config["name"]]
chex.assert_equal(batch[IMAGE].shape[1:4], image_shape)
aug_fn = build_aug_fn_from_config(self.config)
aug_rng = jax.random.PRNGKey(self.config["seed"])
batch = aug_fn(aug_rng, batch)
# init train state on cpu first
rng = jax.random.PRNGKey(self.config.seed)
train_state = jax.jit(
partial(init_train_state, loss_init=loss.init, config=self.config),
backend="cpu",
)(
batch=batch,
rng=rng,
)
# then broadcast train_state to devices
train_state = broadcast_to_local_devices(train_state)
# define pmap-ed update func
self.update_params_pmap = jax.pmap(
partial(
update_parameters,
loss_apply=loss.apply,
config=self.config,
),
axis_name=REPLICA_AXIS,
donate_argnums=(0,),
)
return train_state
def train_step(
self,
train_state: TrainState,
) -> Union[TrainState, chex.ArrayTree]:
"""Training step.
Args:
train_state: training state.
Returns:
- updated train_state.
- metric dict.
"""
batch = next(self.train_iter)
train_state, scalars = self.update_params_pmap(
train_state,
batch,
)
scalars = get_first_replica_values(scalars)
scalars = jax.tree_map(lambda x: x.item(), scalars) # tensor to values
return train_state, scalars
def eval_init(self) -> None:
"""Initialize data loader, loss, networks for validation."""
evaluate = hk.transform_with_state(
partial(batch_eval, config=self.config)
)
self.evaluate_pmap = jax.pmap(
evaluate.apply,
axis_name=REPLICA_AXIS,
)
self.eval_dataset = build_dataset_eval_fn(self.config)
def eval_step(
self,
split: str,
params: hk.Params,
state: hk.State,
rng: jax.random.PRNGKey,
out_dir: Optional[Path],
save_predictions: bool,
) -> Dict:
"""Validation step on entire validation data set.
Args:
split: data split.
params: network parameters.
state: network state.
rng: random key.
out_dir: output directory to save metrics and predictions,
if None, no files will be saved.
save_predictions: if True, save predicted masks.
Returns:
metric dict.
Raises:
ValueError: if split is not supported.
"""
if split not in [VALID_SPLIT, TEST_SPLIT]:
raise ValueError(
"Evaluation can only be performed on valid and test splits."
)
if split == VALID_SPLIT:
batch_iterator = self.valid_iter
num_steps = self.dataset.num_valid_steps
else:
batch_iterator = self.test_iter
num_steps = self.dataset.num_test_steps
if out_dir is not None:
out_dir.mkdir(parents=True, exist_ok=True)
return self.eval_dataset(
evaluate_pmap=self.evaluate_pmap,
params=params,
state=state,
rng=rng,
batch_iterator=batch_iterator,
num_steps=num_steps,
out_dir=out_dir,
save_predictions=save_predictions,
)
| 13,642 | 30.801865 | 79 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/loss.py | """Module for building models and losses."""
from typing import Callable, Dict, Tuple
import haiku as hk
import jax
import jax.numpy as jnp
from omegaconf import DictConfig
from imgx import IMAGE, LABEL
from imgx.datasets import NUM_CLASSES_MAP
from imgx.diffusion.gaussian_diffusion import (
DiffusionModelOutputType,
DiffusionModelVarianceType,
GaussianDiffusion,
)
from imgx.exp.mixed_precision import set_mixed_precision_policy
from imgx.exp.model import build_diffusion_model, build_vision_model
from imgx.loss import mean_cross_entropy, mean_focal_loss
from imgx.loss.dice import (
dice_loss,
mean_with_background,
mean_without_background,
)
from imgx.metric import class_proportion
def segmentation_loss_with_aux(
logits: jnp.ndarray,
mask_true: jnp.ndarray,
loss_config: DictConfig,
) -> Tuple[jnp.ndarray, Dict[str, jnp.ndarray]]:
"""Calculate segmentation loss with auxiliary losses and return metrics.
Args:
logits: unnormalised logits of shape (batch, ..., num_classes).
mask_true: one hot label of shape (batch, ..., num_classes).
loss_config: have weights of diff losses.
Returns:
- calculated loss.
- metrics.
"""
scalars = {}
# Dice
# (batch, num_classes)
dice_loss_batch_cls = dice_loss(
logits=logits,
mask_true=mask_true,
)
# (1,)
dice_loss_scalar = jax.lax.cond(
loss_config["dice_include_background"],
mean_with_background,
mean_without_background,
dice_loss_batch_cls,
)
scalars["mean_dice_loss"] = dice_loss_scalar
# metrics
for i in range(dice_loss_batch_cls.shape[-1]):
scalars[f"mean_dice_loss_class_{i}"] = jnp.nanmean(
dice_loss_batch_cls[:, i]
)
scalars[f"min_dice_loss_class_{i}"] = jnp.nanmin(
dice_loss_batch_cls[:, i]
)
scalars[f"max_dice_loss_class_{i}"] = jnp.nanmax(
dice_loss_batch_cls[:, i]
)
scalars["mean_dice_loss"] = jnp.nanmean(dice_loss_batch_cls)
scalars["min_dice_loss"] = jnp.nanmin(dice_loss_batch_cls)
scalars["max_dice_loss"] = jnp.nanmax(dice_loss_batch_cls)
# cross entropy
ce_loss_scalar = mean_cross_entropy(
logits=logits,
mask_true=mask_true,
)
scalars["mean_cross_entropy_loss"] = ce_loss_scalar
# focal loss
focal_loss_scalar = mean_focal_loss(
logits=logits,
mask_true=mask_true,
)
scalars["mean_focal_loss"] = focal_loss_scalar
# total loss
loss_scalar = 0
if loss_config["dice"] > 0:
loss_scalar += dice_loss_scalar * loss_config["dice"]
if loss_config["cross_entropy"] > 0:
loss_scalar += ce_loss_scalar * loss_config["cross_entropy"]
if loss_config["focal"] > 0:
loss_scalar += focal_loss_scalar * loss_config["focal"]
# class proportion
# (batch, num_classes)
cls_prop = class_proportion(mask_true)
for i in range(dice_loss_batch_cls.shape[-1]):
scalars[f"mean_proportion_class_{i}"] = jnp.nanmean(cls_prop[:, i])
scalars[f"min_proportion_class_{i}"] = jnp.nanmin(cls_prop[:, i])
scalars[f"max_proportion_class_{i}"] = jnp.nanmax(cls_prop[:, i])
return loss_scalar, scalars
def segmentation_loss(
input_dict: Dict[str, jnp.ndarray],
model: hk.Module,
num_classes: int,
loss_config: DictConfig,
) -> Tuple[jnp.ndarray, Dict[str, jnp.ndarray]]:
"""Calculate segmentation loss and return metrics.
Args:
input_dict: input data having image and label.
model: network instance.
num_classes: number of classes including background.
loss_config: have weights of diff losses.
Returns:
- calculated loss.
- metrics.
"""
# (batch, ..., 1)
image = jnp.expand_dims(input_dict[IMAGE], axis=-1)
# (batch, ..., num_classes)
logits = model(image=image, is_train=True)
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(
input_dict[LABEL], num_classes=num_classes, axis=-1
)
return segmentation_loss_with_aux(
logits=logits,
mask_true=mask_true,
loss_config=loss_config,
)
def diffusion_loss( # pylint:disable=R0915
input_dict: Dict[str, jnp.ndarray],
num_classes: int,
gd: GaussianDiffusion,
loss_config: DictConfig,
recycle: bool,
) -> Tuple[jnp.ndarray, Dict[str, jnp.ndarray]]:
"""Calculate diffusion loss and return metrics.
In diffusion, the noise is defined on segmentation mask.
That is, x_t is segmentation logits.
Args:
input_dict: input data having image, label, and time_step.
image: (batch, ...)
label: (batch, ..., num_classes)
time_step: (batch, )
num_classes: number of classes including background.
gd: model for sampling.
loss_config: have weights of diff losses.
recycle: recycle model prediction or not.
Returns:
- calculated loss.
- metrics.
"""
scalars = {}
# (batch, ..., 1)
image = jnp.expand_dims(input_dict[IMAGE], axis=-1)
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(
input_dict[LABEL],
num_classes=num_classes,
axis=-1,
dtype=image.dtype,
)
# noise are standard normal distribution
x_start = mask_true * 2 - 1
# (batch, )
t = gd.sample_timestep(batch_size=image.shape[0])
if recycle:
# (batch, ..., num_classes)
noise_recyle = gd.noise_sample(shape=x_start.shape, dtype=x_start.dtype)
t_recycle = jnp.minimum(t + 1, gd.num_timesteps - 1)
x_t_recycle = gd.q_sample(
x_start=x_start, noise=noise_recyle, t=t_recycle
)
# (batch, ..., ch_input + num_classes)
model_in_recycle = jnp.concatenate([image, x_t_recycle], axis=-1)
# (batch, ..., num_classes) or (batch, ..., 2*num_classes)
# model outputs are always logits
model_out_recycle = gd.model(model_in_recycle, t_recycle, is_train=True)
x_start_recycle, _, _ = gd.p_mean_variance(
model_out=model_out_recycle,
x_t=x_t_recycle,
t=t_recycle,
)
x_start = jax.lax.stop_gradient(x_start_recycle)
# (batch, ..., num_classes)
noise = gd.noise_sample(shape=x_start.shape, dtype=x_start.dtype)
x_t = gd.q_sample(x_start=x_start, noise=noise, t=t)
# (batch, ..., ch_input + num_classes)
model_in = jnp.concatenate([image, x_t], axis=-1)
# (batch, ..., num_classes) or (batch, ..., 2*num_classes)
# model outputs are always logits
model_out = gd.model(model_in, t, is_train=True)
model_out_vlb = jax.lax.stop_gradient(model_out)
if gd.model_var_type in [
DiffusionModelVarianceType.LEARNED,
DiffusionModelVarianceType.LEARNED_RANGE,
]:
# model_out (batch, ..., num_classes)
model_out, log_variance = jnp.split(
model_out, indices_or_sections=2, axis=-1
)
# apply a stop-gradient to the mean output for the vlb to prevent
# this loss change mean prediction
model_out_vlb = jax.lax.stop_gradient(model_out)
# model_out (batch, ..., num_classes*2)
model_out_vlb = jnp.concatenate([model_out_vlb, log_variance], axis=-1)
vlb_scalar = gd.variational_lower_bound(
model_out=model_out_vlb,
x_start=x_start,
x_t=x_t,
t=t,
)
vlb_scalar = jnp.nanmean(vlb_scalar)
scalars["vlb_loss"] = vlb_scalar
if gd.model_out_type == DiffusionModelOutputType.EPSILON:
mse_loss_scalar = jnp.mean((model_out - noise) ** 2)
scalars["mse_loss"] = mse_loss_scalar
x_start = gd.predict_xstart_from_epsilon_xt(
x_t=x_t, epsilon=model_out, t=t
)
logits = gd.x_to_logits(x_start)
seg_loss_scalar, seg_scalars = segmentation_loss_with_aux(
logits=logits,
mask_true=mask_true,
loss_config=loss_config,
)
scalars = {**scalars, **seg_scalars}
loss_scalar = loss_config["mse"] * mse_loss_scalar + seg_loss_scalar
elif gd.model_out_type == DiffusionModelOutputType.X_START:
logits = model_out
loss_scalar, seg_scalars = segmentation_loss_with_aux(
logits=logits,
mask_true=mask_true,
loss_config=loss_config,
)
scalars = {**scalars, **seg_scalars}
else:
raise ValueError(
f"Unknown DiffusionModelOutputType {gd.model_out_type}."
)
if gd.model_var_type in [
DiffusionModelVarianceType.LEARNED,
DiffusionModelVarianceType.LEARNED_RANGE,
]:
# TODO nan values may happen
loss_scalar += vlb_scalar * gd.num_timesteps / gd.num_timesteps_beta
scalars["total_loss"] = loss_scalar
scalars["mean_t"] = jnp.mean(t)
scalars["max_t"] = jnp.max(t)
scalars["min_t"] = jnp.min(t)
return loss_scalar, scalars
def build_loss_fn(
config: DictConfig,
) -> Callable[
[Dict[str, jnp.ndarray]], Tuple[jnp.ndarray, Dict[str, jnp.ndarray]]
]:
"""Build model from config.
Args:
config: entire config.
Returns:
Loss function.
Raises:
ValueError: if config is wrong or not supported.
"""
data_config = config.data
task_config = config.task
model_config = config.model
loss_config = config.loss
mp_config = config.training.mixed_precision
set_mixed_precision_policy(
use_mp=mp_config.use, model_name=model_config.name
)
# number of classes including background
num_classes = NUM_CLASSES_MAP[data_config["name"]]
if task_config["name"] == "segmentation":
def seg_loss_fn(
input_dict: Dict[str, jnp.ndarray]
) -> Tuple[jnp.ndarray, Dict[str, jnp.ndarray]]:
vision_model = build_vision_model(
data_config=data_config,
task_config=task_config,
model_config=model_config,
)
return segmentation_loss(
input_dict=input_dict,
model=vision_model,
num_classes=num_classes,
loss_config=loss_config,
)
return seg_loss_fn
if task_config["name"] == "diffusion":
def diffusion_loss_fn(
input_dict: Dict[str, jnp.ndarray]
) -> Tuple[jnp.ndarray, Dict[str, jnp.ndarray]]:
vision_model = build_vision_model(
data_config=data_config,
task_config=task_config,
model_config=model_config,
)
diffusion_model = build_diffusion_model(
model=vision_model,
diffusion_config=task_config["diffusion"],
)
recycle = task_config["diffusion"]["recycle"]
return diffusion_loss(
input_dict=input_dict,
gd=diffusion_model,
num_classes=num_classes,
loss_config=loss_config,
recycle=recycle,
)
return diffusion_loss_fn
raise ValueError(f"Unknown task {task_config['name']}.")
| 11,245 | 30.858357 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/model.py | """Module for building models."""
import haiku as hk
from omegaconf import DictConfig
from imgx.datasets import IMAGE_SHAPE_MAP, NUM_CLASSES_MAP
from imgx.diffusion.gaussian_diffusion import (
DiffusionBetaSchedule,
DiffusionModelOutputType,
DiffusionModelVarianceType,
DiffusionSpace,
GaussianDiffusion,
)
from imgx.model import Unet3d, Unet3dSlice, Unet3dSliceTime, Unet3dTime
def build_vision_model(
data_config: DictConfig,
task_config: DictConfig,
model_config: DictConfig,
) -> hk.Module:
"""Build model from config.
Args:
data_config: have in_shape and out_channels.
task_config: have task name and configs.
model_config: have model name attribute.
Returns:
Model.
Raises:
ValueError: if config is wrong or not supported.
"""
if model_config.name not in model_config:
raise ValueError(f"Missing configuration for {model_config.name}.")
dataset_name = data_config["name"]
image_shape = IMAGE_SHAPE_MAP[dataset_name]
num_classes = NUM_CLASSES_MAP[dataset_name]
if task_config.name == "segmentation":
# TODO use enum
in_channels = 1 # forward will expand dimension
out_channels = num_classes
elif task_config.name == "diffusion":
# diffusion model takes the image and a noised mask/logits as input
in_channels = 1 + num_classes
# diffusion model may output variance per class
out_channels = num_classes
model_var_type = DiffusionModelVarianceType[
task_config["diffusion"]["model_var_type"].upper()
]
if model_var_type in [
DiffusionModelVarianceType.LEARNED,
DiffusionModelVarianceType.LEARNED_RANGE,
]:
out_channels *= 2
else:
raise ValueError(f"Unknown task {task_config.name}.")
total_config = {
"remat": model_config.remat,
"in_shape": image_shape,
"in_channels": in_channels,
"out_channels": out_channels,
**model_config[model_config.name],
}
if model_config.name == "unet3d":
return Unet3d(**total_config)
if model_config.name == "unet3d_slice":
return Unet3dSlice(**total_config)
if model_config.name == "unet3d_time":
num_timesteps = task_config["diffusion"]["num_timesteps"]
return Unet3dTime(num_timesteps=num_timesteps, **total_config)
if model_config.name == "unet3d_slice_time":
num_timesteps = task_config["diffusion"]["num_timesteps"]
return Unet3dSliceTime(num_timesteps=num_timesteps, **total_config)
raise ValueError(f"Unknown model {model_config.name}.")
def build_diffusion_model(
model: hk.Module,
diffusion_config: DictConfig,
) -> GaussianDiffusion:
"""Build diffusion model from config and vision model.
Args:
model: the model used in diffusion.
diffusion_config: config for diffusion setting.
Returns:
A GaussianDiffusion model.
"""
num_timesteps = diffusion_config["num_timesteps"]
num_timesteps_beta = diffusion_config["num_timesteps_beta"]
beta_config = diffusion_config["beta"].copy()
beta_config["beta_schedule"] = DiffusionBetaSchedule[
beta_config["beta_schedule"].upper()
]
model_out_type = DiffusionModelOutputType[
diffusion_config["model_out_type"].upper()
]
model_var_type = DiffusionModelVarianceType[
diffusion_config["model_var_type"].upper()
]
x_space = DiffusionSpace[diffusion_config["x_space"].upper()]
x_limit = diffusion_config["x_limit"]
use_ddim = diffusion_config["use_ddim"]
return GaussianDiffusion(
model=model,
num_timesteps=num_timesteps,
num_timesteps_beta=num_timesteps_beta,
model_out_type=model_out_type,
model_var_type=model_var_type,
x_space=x_space,
x_limit=x_limit,
use_ddim=use_ddim,
**beta_config,
)
| 3,969 | 32.644068 | 75 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/mixed_precision.py | """Mixed precision related functions."""
from functools import partial
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jmp
from imgx import model
from imgx.model import CONFIG_NAME_TO_MODEL_CLS_NAME
def get_mixed_precision_policy(use_mp: bool) -> jmp.Policy:
"""Return general mixed precision policy.
Args:
use_mp: use mixed precision if True.
Returns:
Policy instance.
"""
full = jnp.float32
if not use_mp:
return jmp.Policy(
compute_dtype=full, param_dtype=full, output_dtype=full
)
half = jmp.half_dtype()
return jmp.Policy(compute_dtype=half, param_dtype=full, output_dtype=full)
def get_mixed_precision_policy_for_normalization(use_mp: bool) -> jmp.Policy:
"""Return mixed precision policy for norms.
Args:
use_mp: use mixed precision if True.
Returns:
Policy instance.
"""
full = jnp.float32
if not use_mp:
return jmp.Policy(
compute_dtype=full, param_dtype=full, output_dtype=full
)
half = jmp.half_dtype()
return jmp.Policy(compute_dtype=full, param_dtype=full, output_dtype=half)
def select_tree(
pred: jnp.ndarray, a: chex.ArrayTree, b: chex.ArrayTree
) -> chex.ArrayTree:
"""Selects a pytree based on the given predicate.
Replace jmp.select_tree as it used jax.tree_multimap
which has been deprecated.
Args:
pred: bool array.
a: values for true.
b: values for false.
Returns:
Selected tree.
Raises:
ValueError: if pred dtype or shape is wrong.
"""
if not (pred.ndim == 0 and pred.dtype == jnp.bool_):
raise ValueError("expected boolean scalar")
return jax.tree_map(partial(jax.lax.select, pred), a, b)
def set_mixed_precision_policy(use_mp: bool, model_name: str) -> None:
"""Set mixed precision policy for networks.
Args:
use_mp: use mixed precision if True.
model_name: name of the model.
"""
# assign mixed precision policies to modules
# for norms, use the full precision for stability
mp_policy = get_mixed_precision_policy(use_mp)
mp_norm_policy = get_mixed_precision_policy_for_normalization(use_mp)
# the order we call `set_policy` doesn't matter, when a method on a
# class is called the policy for that class will be applied, or it will
# inherit the policy from its parent module.
hk.mixed_precision.set_policy(hk.BatchNorm, mp_norm_policy)
hk.mixed_precision.set_policy(hk.GroupNorm, mp_norm_policy)
hk.mixed_precision.set_policy(hk.LayerNorm, mp_norm_policy)
hk.mixed_precision.set_policy(hk.InstanceNorm, mp_norm_policy)
if model_name not in CONFIG_NAME_TO_MODEL_CLS_NAME:
raise ValueError(f"Unknown model name {model_name}.")
model_cls_name = CONFIG_NAME_TO_MODEL_CLS_NAME[model_name]
hk.mixed_precision.set_policy(getattr(model, model_cls_name), mp_policy)
| 2,956 | 29.173469 | 78 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/eval.py | """Module for building evaluation functions."""
import json
from functools import partial
from pathlib import Path
from typing import Callable, Dict, Iterable, Optional, Tuple
import chex
import haiku as hk
import jax
import numpy as np
import pandas as pd
from jax import numpy as jnp
from omegaconf import DictConfig
from imgx import IMAGE, LABEL, UID
from imgx.datasets import (
DIR_TFDS_PROCESSED_MAP,
IMAGE_SPACING_MAP,
NUM_CLASSES_MAP,
)
from imgx.datasets.preprocess import save_segmentation_prediction
from imgx.datasets.util import unpad
from imgx.device import unshard
from imgx.diffusion.gaussian_diffusion import GaussianDiffusion
from imgx.exp.mixed_precision import set_mixed_precision_policy
from imgx.exp.model import build_diffusion_model, build_vision_model
from imgx.math_util import logits_to_mask
from imgx.metric import (
aggregated_surface_distance,
centroid_distance,
class_proportion,
dice_score,
iou,
normalized_surface_dice_from_distances,
)
from imgx.metric.centroid import get_coordinate_grid
def get_jit_segmentation_metrics(
mask_pred: jnp.ndarray, mask_true: jnp.ndarray, spacing: jnp.ndarray
) -> Dict[str, jnp.ndarray]:
"""Calculate segmentation metrics.
Use nanmean in case some classes do not exist.
Args:
mask_true: shape = (batch, ..., num_classes).
mask_pred: shape = (batch, ..., num_classes).
spacing: spacing of pixel/voxels along each dimension, (3,).
Returns:
Dict of metrics, each value is of shape (batch,).
"""
chex.assert_equal_shape([mask_pred, mask_true])
scalars = {}
# binary dice (batch, num_classes)
dice_score_bc = dice_score(
mask_pred=mask_pred,
mask_true=mask_true,
)
for i in range(dice_score_bc.shape[-1]):
scalars[f"binary_dice_score_class_{i}"] = dice_score_bc[:, i]
scalars["mean_binary_dice_score"] = jnp.nanmean(dice_score_bc, axis=1)
scalars["mean_binary_dice_score_without_background"] = jnp.nanmean(
dice_score_bc[:, 1:], axis=1
)
# IoU (batch, num_classes)
iou_bc = iou(
mask_pred=mask_pred,
mask_true=mask_true,
)
for i in range(iou_bc.shape[-1]):
scalars[f"iou_class_{i}"] = iou_bc[:, i]
scalars["mean_iou"] = jnp.nanmean(iou_bc, axis=1)
scalars["mean_iou_without_background"] = jnp.nanmean(iou_bc[:, 1:], axis=1)
# centroid distance (batch, num_classes)
grid = get_coordinate_grid(shape=mask_pred.shape[1:-1])
centroid_dist_bc = centroid_distance(
mask_pred=mask_pred,
mask_true=mask_true,
grid=grid,
spacing=spacing,
)
for i in range(centroid_dist_bc.shape[-1]):
scalars[f"centroid_dist_class_{i}"] = centroid_dist_bc[:, i]
scalars["mean_centroid_dist"] = jnp.nanmean(centroid_dist_bc, axis=1)
scalars["mean_centroid_dist_without_background"] = jnp.nanmean(
centroid_dist_bc[:, 1:], axis=1
)
# class proportion (batch, num_classes)
for mask, mask_name in zip([mask_pred, mask_true], ["pred", "label"]):
class_prop_bc = class_proportion(mask)
for i in range(class_prop_bc.shape[-1]):
scalars[f"class_{i}_proportion_{mask_name}"] = class_prop_bc[:, i]
return scalars
def get_non_jit_segmentation_metrics(
mask_pred: jnp.ndarray,
mask_true: jnp.ndarray,
spacing: Optional[jnp.ndarray],
) -> Dict[str, jnp.ndarray]:
"""Calculate non-jittable segmentation metrics for batch.
Use nanmean in case some classes do not exist.
Args:
mask_pred: (batch, w, h, d, num_classes)
mask_true: (batch, w, h, d, num_classes)
spacing: spacing of pixel/voxels along each dimension.
Returns:
Dict of metrics, each value is of shape (batch,).
"""
chex.assert_equal_shape([mask_pred, mask_true])
batch_scalars = {}
# (3, batch, num_classes)
# mean surface distance
# hausdorff distance, 95 percentile
# normalised surface dice
sur_dist_bc = aggregated_surface_distance(
mask_pred=np.array(mask_pred),
mask_true=np.array(mask_true),
agg_fns=[
np.mean,
partial(np.percentile, q=95),
normalized_surface_dice_from_distances,
],
num_args=[1, 1, 2],
spacing=spacing,
)
for i in range(sur_dist_bc.shape[-1]):
batch_scalars[f"mean_surface_dist_class_{i}"] = sur_dist_bc[0, :, i]
batch_scalars[f"hausdorff_dist_class_{i}"] = sur_dist_bc[1, :, i]
batch_scalars[f"surface_dice_class_{i}"] = sur_dist_bc[2, :, i]
batch_scalars["mean_mean_surface_dist"] = np.nanmean(
sur_dist_bc[0, ...], axis=-1
)
batch_scalars["mean_hausdorff_dist"] = np.nanmean(
sur_dist_bc[1, ...], axis=-1
)
batch_scalars["mean_surface_dice"] = np.nanmean(
sur_dist_bc[2, ...], axis=-1
)
batch_scalars["mean_mean_surface_dist_without_background"] = np.nanmean(
sur_dist_bc[0, :, 1:], axis=-1
)
batch_scalars["mean_hausdorff_dist_without_background"] = np.nanmean(
sur_dist_bc[1, :, 1:], axis=-1
)
batch_scalars["mean_surface_dice_without_background"] = np.nanmean(
sur_dist_bc[2, :, 1:], axis=-1
)
return batch_scalars
def batch_segmentation_evaluation(
input_dict: Dict[str, jnp.ndarray],
model: hk.Module,
spacing: jnp.ndarray,
num_classes: int,
) -> Tuple[Dict[str, jnp.ndarray], jnp.ndarray]:
"""Evaluate binary predictions.
Args:
input_dict: input data having image and label.
model: network instance.
spacing: spacing of pixel/voxels along each dimension.
num_classes: number of classes including background.
Returns:
- metrics, each metric value has shape (batch, ).
- logits.
"""
# (batch, ..., 1)
image = jnp.expand_dims(input_dict[IMAGE], axis=-1)
# (batch, ..., num_classes)
logits = model(image=image, is_train=False)
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(
input_dict[LABEL], num_classes=num_classes, axis=-1
)
# (batch, ..., num_classes)
mask_pred = logits_to_mask(logits, axis=-1)
# evaluate
scalars = get_jit_segmentation_metrics(
mask_pred=mask_pred, mask_true=mask_true, spacing=spacing
)
return scalars, logits
def batch_diffusion_evaluation(
input_dict: Dict[str, jnp.ndarray],
spacing: jnp.ndarray,
num_classes: int,
diffusion_model: GaussianDiffusion,
) -> Tuple[Dict[str, jnp.ndarray], jnp.ndarray]:
"""Evaluate predictions from diffusion model.
Args:
input_dict: input data having image and label.
spacing: spacing of pixel/voxels along each dimension.
num_classes: number of classes including background.
diffusion_model: model for sampling.
Returns:
- metrics for all time steps, each metric value has shape (batch, ).
- logits for all time steps.
"""
# (batch, ..., 1)
image = jnp.expand_dims(input_dict[IMAGE], axis=-1)
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(
input_dict[LABEL], num_classes=num_classes, axis=-1
)
# (batch, ..., num_classes)
x_t = diffusion_model.noise_sample(shape=mask_true.shape, dtype=image.dtype)
# (batch, ..., num_classes)
x_start = jnp.stack(
list(diffusion_model.sample_mask_progressive(image=image, x_t=x_t)),
axis=-1,
)
# evaluate
# (batch, ..., num_classes, num_timesteps_sample)
mask_pred = logits_to_mask(x_start, axis=-2)
scalars = jax.vmap(
partial(
get_jit_segmentation_metrics,
mask_true=mask_true,
spacing=spacing,
),
in_axes=-1,
out_axes=-1,
)(mask_pred)
return scalars, x_start
def build_batch_eval_fn(
config: DictConfig,
) -> Callable:
"""Build model from config.
Args:
config: entire config.
Returns:
Evaluate function.
Raises:
ValueError: if config is wrong or not supported.
"""
if not hasattr(config.model, "name"):
raise ValueError("Config does have model name.")
set_mixed_precision_policy(
use_mp=config.training.mixed_precision.use, model_name=config.model.name
)
# image spacing
data_config = config.data
dataset_name = data_config.name
spacing = jnp.array(IMAGE_SPACING_MAP[dataset_name])
num_classes = NUM_CLASSES_MAP[dataset_name]
task_config = config.task
vision_model = build_vision_model(
data_config=data_config,
task_config=task_config,
model_config=config.model,
)
if task_config["name"] == "segmentation":
return partial(
batch_segmentation_evaluation,
model=vision_model,
spacing=spacing,
num_classes=num_classes,
)
if task_config["name"] == "diffusion":
diffusion_model = build_diffusion_model(
model=vision_model,
diffusion_config=task_config["diffusion"],
)
return partial(
batch_diffusion_evaluation,
spacing=spacing,
num_classes=num_classes,
diffusion_model=diffusion_model,
)
raise ValueError(f"Unknown task {task_config['name']}.")
def get_non_jit_segmentation_metrics_per_step(
mask_pred: jnp.ndarray,
mask_true: jnp.ndarray,
spacing: Optional[jnp.ndarray],
) -> Dict[str, jnp.ndarray]:
"""Calculate non-jittable segmentation metrics for batch.
Cannot use VMAP as it requires jittable functions.
Args:
mask_pred: (batch, w, h, d, num_classes, num_steps)
mask_true: (batch, w, h, d, num_classes)
spacing: spacing of pixel/voxels along each dimension.
Returns:
Metrics dict, each value is a list corresponding to steps.
"""
if mask_pred.ndim != 6:
raise ValueError(
"mask_pred should have shape "
"(batch, w, h, d, num_classes, num_timesteps_sample) ,"
f"got {mask_pred.shape}."
)
lst_scalars = []
for i in range(mask_pred.shape[-1]):
scalars = get_non_jit_segmentation_metrics(
mask_pred=mask_pred[..., i],
mask_true=mask_true,
spacing=spacing,
)
lst_scalars.append(scalars)
scalar_keys = lst_scalars[0].keys()
scalars = {}
for k in scalar_keys:
scalars[k] = np.stack([x[k] for x in lst_scalars], axis=-1)
return scalars
def dataset_segmentation_evaluation( # pylint:disable=R0912,R0915
evaluate_pmap: Callable,
params: hk.Params,
state: hk.State,
rng: jnp.ndarray,
batch_iterator: Iterable[Dict[str, chex.ArrayTree]],
num_steps: int,
is_diffusion: bool,
spacing: Optional[jnp.ndarray],
out_dir: Optional[Path],
tfds_dir: Path,
save_predictions: bool,
) -> Dict:
"""Get predictions and perform evaluations of a data set.
Args:
evaluate_pmap: forward function to call.
params: model parameters.
state: model state, EMA or not.
rng: random key.
batch_iterator: iterator of a data set.
num_steps: number of steps.
is_diffusion: the method is diffusion or not.
spacing: spacing of pixel/voxels along each dimension.
out_dir: output directory for metrics and predictions,
if None, no files will be saved.
tfds_dir: directory saving preprocessed images and labels.
save_predictions: if True, save predicted masks.
Returns:
- predicted values
- indices
- metrics
"""
lst_df_scalar = []
for _ in range(num_steps):
batch = next(batch_iterator) # type: ignore[call-overload]
# get UID and parse to string
uids = batch.pop(UID)
uids = uids.reshape(-1) # remove shard axis
uids = [
x.decode("utf-8") if isinstance(x, bytes) else x
for x in uids.tolist()
]
# non diffusion
# logits (num_shards, batch, w, h, d, num_classes)
# metrics (num_shards, batch)
# diffusion
# logits (num_shards, batch, w, h, d, num_classes, num_timesteps)
# metrics (num_shards, batch, num_timesteps)
# arrays are across all devices
batch = jax.lax.stop_gradient(batch)
(scalars, logits), _ = evaluate_pmap(params, state, rng, batch)
label = batch[LABEL]
# put on cpu
device_cpu = jax.devices("cpu")[0]
scalars = jax.device_put(scalars, device_cpu)
logits = jax.device_put(logits, device_cpu)
label = jax.device_put(label, device_cpu)
# remove shard axis
# array are on device 0
scalars = unshard(scalars)
logits = unshard(logits)
label = unshard(label)
# remove padded examples
if 0 in uids:
# the batch was not complete, padded with zero
num_samples = uids.index(0)
uids = uids[:num_samples]
scalars = unpad(scalars, num_samples)
logits = unpad(logits, num_samples)
label = unpad(label, num_samples)
if is_diffusion:
num_classes = logits.shape[-2]
mask_true = jax.nn.one_hot(label, num_classes=num_classes, axis=-1)
mask_pred = logits_to_mask(logits, axis=-2)
scalars_non_jit = get_non_jit_segmentation_metrics_per_step(
mask_pred=mask_pred,
mask_true=mask_true,
spacing=spacing,
)
else:
num_classes = logits.shape[-1]
mask_true = jax.nn.one_hot(label, num_classes=num_classes, axis=-1)
scalars_non_jit = get_non_jit_segmentation_metrics(
mask_pred=logits_to_mask(logits, axis=-1),
mask_true=mask_true,
spacing=spacing,
)
scalars = {**scalars, **scalars_non_jit}
# for diffusion separate metrics per step
if is_diffusion:
scalars_flatten = {}
for k, v in scalars.items():
for i in range(v.shape[-1]):
scalars_flatten[f"{k}_step_{i}"] = v[..., i]
scalars_flatten[k] = v[..., -1]
scalars = scalars_flatten
# save output
if save_predictions and (out_dir is not None):
if is_diffusion:
for i in range(logits.shape[-1]):
mask_pred = np.array(
jnp.argmax(logits[..., i], axis=-1), dtype=int
)
save_segmentation_prediction(
preds=mask_pred,
uids=uids,
out_dir=out_dir / f"step_{i}",
tfds_dir=tfds_dir,
)
else:
mask_pred = np.array(jnp.argmax(logits, axis=-1), dtype=int)
save_segmentation_prediction(
preds=mask_pred,
uids=uids,
out_dir=out_dir,
tfds_dir=tfds_dir,
)
# save metrics
scalars = jax.tree_map(lambda x: np.asarray(x).tolist(), scalars)
scalars["uid"] = uids
lst_df_scalar.append(pd.DataFrame(scalars))
# assemble metrics
df_scalar = pd.concat(lst_df_scalar)
df_scalar = df_scalar.sort_values("uid")
if out_dir is not None:
df_scalar.to_csv(out_dir / "metrics_per_sample.csv", index=False)
# average over samples in the dataset
scalars = df_scalar.drop("uid", axis=1).mean().to_dict()
scalars["num_images_in_total"] = len(df_scalar)
if out_dir is not None:
with open(out_dir / "mean_metrics.json", "w", encoding="utf-8") as f:
json.dump(scalars, f, sort_keys=True, indent=4)
return scalars
def build_dataset_eval_fn(config: DictConfig) -> Callable:
"""Return a function to evaluate data set.
Args:
config: entire config.
Returns:
A function.
Raises:
ValueError: if data set in unknown.
"""
is_diffusion = config["task"]["name"] == "diffusion"
spacing = jnp.array(IMAGE_SPACING_MAP[config.data.name])
tfds_dir = DIR_TFDS_PROCESSED_MAP[config.data.name]
return partial(
dataset_segmentation_evaluation,
is_diffusion=is_diffusion,
spacing=spacing,
tfds_dir=tfds_dir,
)
| 16,503 | 31.746032 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/__init__.py | """Module to manage experiments."""
from imgx.exp.experiment import Experiment
__all__ = ["Experiment"]
| 105 | 20.2 | 42 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_dataset_util.py | """Tests for image utils of datasets."""
from typing import Tuple
import chex
import numpy as np
import pytest
from chex._src import fake
from imgx.datasets.util import (
get_center_crop_shape,
get_center_crop_shape_from_bbox,
get_center_pad_shape,
get_foreground_range,
try_to_get_center_crop_shape,
)
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
@pytest.mark.parametrize(
("current_shape", "target_shape", "expected_lower", "expected_upper"),
[
(
(64, 44, 40),
(64, 44, 40),
(0, 0, 0),
(0, 0, 0),
),
(
(64, 44, 40),
(40, 44, 30),
(0, 0, 0),
(0, 0, 0),
),
(
(64, 44, 40),
(64, 64, 40),
(0, 10, 0),
(0, 10, 0),
),
(
(63, 43, 39),
(64, 64, 40),
(0, 10, 0),
(1, 11, 1),
),
(
(44, 40),
(64, 40),
(10, 0),
(10, 0),
),
(
(43, 39),
(64, 40),
(10, 0),
(11, 1),
),
],
ids=[
"3d_same",
"3d_no_pad",
"3d_even",
"3d_odd",
"2d_even",
"2d_odd",
],
)
def test_get_center_pad_shape(
current_shape: Tuple[int, ...],
target_shape: Tuple[int, ...],
expected_lower: Tuple[int, ...],
expected_upper: Tuple[int, ...],
) -> None:
"""Test get_center_pad_shape.
Args:
current_shape: current shape of the image.
target_shape: target shape of the image.
expected_lower: shape to pad on the lower side.
expected_upper: shape to pad on the upper side.
"""
got_lower, got_upper = get_center_pad_shape(current_shape, target_shape)
assert got_lower == expected_lower
assert got_upper == expected_upper
@pytest.mark.parametrize(
("current_shape", "target_shape", "expected_lower", "expected_upper"),
[
(
(64, 44, 40),
(64, 44, 40),
(0, 0, 0),
(0, 0, 0),
),
(
(64, 44, 40),
(64, 64, 40),
(0, 0, 0),
(0, 0, 0),
),
(
(64, 44, 40),
(40, 44, 30),
(12, 0, 5),
(12, 0, 5),
),
(
(65, 45, 41),
(40, 44, 30),
(12, 0, 5),
(13, 1, 6),
),
(
(64, 40),
(40, 30),
(12, 5),
(12, 5),
),
(
(65, 41),
(40, 30),
(12, 5),
(13, 6),
),
],
ids=[
"3d_same",
"3d_no_crop",
"3d_even",
"3d_odd",
"2d_even",
"2d_odd",
],
)
def test_get_center_crop_shape(
current_shape: Tuple[int, ...],
target_shape: Tuple[int, ...],
expected_lower: Tuple[int, ...],
expected_upper: Tuple[int, ...],
) -> None:
"""Test get_center_crop_shape.
Args:
current_shape: current shape of the image.
target_shape: target shape of the image.
expected_lower: shape to crop on the lower side.
expected_upper: shape to crop on the upper side.
"""
got_lower, got_upper = get_center_crop_shape(current_shape, target_shape)
assert got_lower == expected_lower
assert got_upper == expected_upper
class TestTryCenterCrop:
"""Test try_to_get_center_crop_shape values and errors."""
@pytest.mark.parametrize(
(
"label_min",
"label_max",
"current_length",
"target_length",
"expected_lower",
"expected_upper",
),
[
(0, 5, 6, 6, 0, 0),
(0, 5, 6, 7, 0, 0),
(0, 5, 6, 4, 0, 2),
(1, 5, 6, 4, 1, 1),
(2, 6, 6, 4, 2, 0),
(0, 3, 7, 4, 0, 3),
(5, 7, 7, 4, 3, 0),
],
ids=[
"no_crop_same_length",
"no_crop_too_short",
"center_crop_no_shift_no_left_crop",
"center_crop_no_shift_both_sides_crop",
"center_crop_no_shift_no_right_crop",
"shift_right",
"shift_left",
],
)
def test_try_to_get_center_crop_shape(
self,
label_min: int,
label_max: int,
current_length: int,
target_length: int,
expected_lower: int,
expected_upper: int,
) -> None:
"""Test try_to_get_center_crop_shape.
Args:
label_min: label index minimum, inclusive.
label_max: label index maximum, exclusive.
current_length: current image length.
target_length: target image length.
expected_lower: shape to crop on the lower side.
expected_upper: shape to crop on the upper side.
"""
got_lower, got_upper = try_to_get_center_crop_shape(
label_min=label_min,
label_max=label_max,
current_length=current_length,
target_length=target_length,
)
assert got_lower == expected_lower
assert got_upper == expected_upper
@pytest.mark.parametrize(
("label_min", "label_max", "current_length", "target_length"),
[
(-1, 5, 6, 6),
(0, 7, 6, 7),
],
ids=[
"min_negative",
"max_too_large",
],
)
def test_get_upsample_padding_configs_error(
self,
label_min: int,
label_max: int,
current_length: int,
target_length: int,
) -> None:
"""Test try_to_get_center_crop_shape raising errors..
Args:
label_min: label index minimum, inclusive.
label_max: label index maximum, exclusive.
current_length: current image length.
target_length: target image length.
"""
with pytest.raises(ValueError) as err: # noqa: PT011
try_to_get_center_crop_shape(
label_min=label_min,
label_max=label_max,
current_length=current_length,
target_length=target_length,
)
assert "Label index out of range." in str(err.value)
@pytest.mark.parametrize(
(
"bbox_min",
"bbox_max",
"current_shape",
"target_shape",
"expected_lower",
"expected_upper",
),
[
(
(0, 0, 0),
(64, 44, 40),
(64, 44, 40),
(64, 44, 40),
(0, 0, 0),
(0, 0, 0),
),
(
(0, 0, 0),
(64, 44, 40),
(64, 44, 40),
(64, 64, 40),
(0, 0, 0),
(0, 0, 0),
),
(
(0, 30, 0),
(20, 44, 40),
(64, 44, 40),
(20, 30, 30),
(0, 14, 5),
(44, 0, 5),
),
(
(0, 30, 0),
(20, 44, 40),
(65, 45, 41),
(20, 30, 30),
(0, 15, 5),
(45, 0, 6),
),
],
ids=[
"3d_same",
"3d_no_crop",
"3d_even",
"3d_odd",
],
)
def test_get_center_crop_shape_from_bbox(
bbox_min: Tuple[int, ...],
bbox_max: Tuple[int, ...],
current_shape: Tuple[int, ...],
target_shape: Tuple[int, ...],
expected_lower: Tuple[int, ...],
expected_upper: Tuple[int, ...],
) -> None:
"""Test get_center_crop_shape_from_bbox.
Args:
bbox_min: [start_in_1st_spatial_dim, ...], inclusive, starts at zero.
bbox_max: [end_in_1st_spatial_dim, ...], exclusive, starts at zero.
current_shape: current shape of the image.
target_shape: target shape of the image.
expected_lower: shape to crop on the lower side.
expected_upper: shape to crop on the upper side.
"""
got_lower, got_upper = get_center_crop_shape_from_bbox(
bbox_min, bbox_max, current_shape, target_shape
)
assert got_lower == expected_lower
assert got_upper == expected_upper
@pytest.mark.parametrize(
("label", "expected"),
[
(
np.array([0, 1, 2, 3]),
np.array([[1, 3]]),
),
(
np.array([1, 2, 3, 0]),
np.array([[0, 2]]),
),
(
np.array([1, 2, 3, 4]),
np.array([[0, 3]]),
),
(
np.array([0, 1, 2, 3, 4, 0, 0]),
np.array([[1, 4]]),
),
(
np.array([[0, 1, 2, 3], [0, 1, 2, 3], [0, 0, 0, 0]]),
np.array([[0, 1], [1, 3]]),
),
],
ids=[
"1d-left",
"1d-right",
"1d-none",
"1d-both",
"2d",
],
)
def test_get_foreground_range(
label: np.ndarray,
expected: np.ndarray,
) -> None:
"""Test get_translation_range return values.
Args:
label: label with int values, not one-hot.
expected: expected range.
"""
got = get_foreground_range(
label=label,
)
chex.assert_trees_all_equal(got, expected)
| 9,387 | 23.968085 | 77 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_metric_centroid_distance.py | """Test centroid distance functions."""
from typing import Optional, Tuple
import chex
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx.metric import centroid_distance
from imgx.metric.centroid import get_centroid, get_coordinate_grid
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestGrid(chex.TestCase):
"""Test get_coordinate_grid."""
@chex.variants(without_jit=True)
@parameterized.named_parameters(
(
"1d",
(2,),
np.asarray([[0.0, 1.0]]),
),
(
"2d",
(3, 2),
np.asarray(
[
[
[0.0, 0.0],
[1.0, 1.0],
[2.0, 2.0],
],
[
[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0],
],
],
),
),
)
def test_values(self, shape: Tuple[int, ...], expected: np.ndarray) -> None:
"""Test exact values.
Args:
shape: shape of the grid, (d1, ..., dn).
expected: expected coordinates.
"""
got = self.variant(get_coordinate_grid)(
shape=shape,
)
chex.assert_trees_all_equal(got, expected)
class TestCentroid(chex.TestCase):
"""Test get_coordinate_grid."""
@chex.all_variants
@parameterized.named_parameters(
(
"1d-1class",
1,
(4,),
),
(
"1d-2classes",
2,
(4,),
),
(
"2d-1class",
1,
(4, 5),
),
(
"2d-2classes",
2,
(4, 3),
),
(
"3d-3classes",
3,
(4, 3, 5),
),
)
def test_shapes(
self,
num_classes: int,
shape: Tuple[int, ...],
) -> None:
"""Test exact values.
Args:
num_classes: number of classes.
shape: shape of the grid, (d1, ..., dn).
"""
batch = 2
got_centroid, got_nan_mask = self.variant(get_centroid)(
mask=np.ones((batch, *shape, num_classes)),
grid=get_coordinate_grid(shape),
)
chex.assert_shape(got_centroid, (batch, len(shape), num_classes))
chex.assert_shape(got_nan_mask, (batch, num_classes))
@chex.all_variants
@parameterized.named_parameters(
(
"1d-1class",
np.asarray([False, True, True, False])[..., None],
(4,),
np.asarray([1.5])[..., None],
),
(
"1d-1class-empty",
np.asarray([False, False, False, False])[..., None],
(4,),
np.asarray([np.nan])[..., None],
),
(
"1d-2classes",
np.asarray(
[[False, True], [True, True], [True, False], [False, False]]
),
(4,),
np.asarray([[1.5, 0.5]]),
),
(
"1d-3classes",
np.asarray(
[
[False, True, True],
[True, True, True],
[True, False, False],
[False, False, False],
]
),
(4,),
np.asarray([[1.5, 0.5, 0.5]]),
),
(
"1d-3classes-with-nan",
np.asarray(
[
[False, True, False],
[True, True, False],
[True, False, False],
[False, False, False],
]
),
(4,),
np.asarray([[1.5, 0.5, np.nan]]),
),
(
"2d-1class",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
)[..., None],
(4, 5),
np.asarray([2.0 / 3.0, 2.0])[..., None],
),
(
"2d-nan",
np.array(
[
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
)[..., None],
(4, 5),
np.asarray([np.nan, np.nan])[..., None],
),
)
def test_values(
self, mask: np.ndarray, shape: Tuple[int, ...], expected: np.ndarray
) -> None:
"""Test exact values.
Args:
mask: boolean mask on the image, (d1, ..., dn, num_classes).
shape: shape of the grid, (d1, ..., dn).
expected: expected coordinates.
"""
got_centroid, got_nan_mask = self.variant(get_centroid)(
mask=mask[None, ...],
grid=get_coordinate_grid(shape),
)
chex.assert_trees_all_close(got_centroid[0, ...], expected)
expected_nan_mask = np.sum(np.isnan(expected), axis=0) > 0
chex.assert_trees_all_close(got_nan_mask[0, ...], expected_nan_mask)
class TestCentroidDistance(chex.TestCase):
"""Test get_coordinate_grid."""
@chex.all_variants
@parameterized.named_parameters(
(
"1d-same-1class",
np.asarray([False, True, True, False])[..., None],
np.asarray([False, True, True, False])[..., None],
None,
np.asarray([0.0]),
),
(
"1d-same-1class-nan",
np.asarray([False, False, False, False])[..., None],
np.asarray([False, True, True, False])[..., None],
None,
np.asarray([np.nan]),
),
(
"1d-diff-1class",
np.asarray([False, True, True, False])[..., None],
np.asarray([False, True, False, False])[..., None],
None,
np.asarray([0.5]),
),
(
"1d-diff-2classes",
np.asarray(
[[False, True], [True, True], [True, False], [False, False]]
),
np.asarray(
[[False, True], [True, False], [True, False], [False, False]]
),
None,
np.asarray([0, 0.5]),
),
(
"1d-diff-2classes-heterogeneous",
np.asarray(
[[False, True], [True, True], [True, False], [False, False]]
),
np.asarray(
[[False, True], [True, False], [True, False], [False, False]]
),
np.asarray(
[
2,
]
),
np.asarray([0, 1.0]),
),
(
"1d-diff-2classes-heterogeneous-nan",
np.asarray(
[[False, True], [True, True], [True, False], [False, False]]
),
np.asarray(
[[False, True], [False, False], [False, False], [False, False]]
),
np.asarray(
[
2,
]
),
np.asarray([np.nan, 1.0]),
),
(
"2d-same-1class",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
)[..., None],
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
)[..., None],
None,
np.asarray([0.0]),
),
(
"2d-diff-1class",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
)[..., None],
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, False],
[False, False, False, False, False],
]
)[..., None],
None,
# dist between (5/3, 3) and (2, 2.5)
np.asarray([np.sqrt(1.0 / 9.0 + 0.25)]),
),
(
"2d-diff-1class-heterogeneous",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
)[..., None],
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, False],
[False, False, False, False, False],
]
)[..., None],
np.asarray([1.0, 2.0]),
# dist between (5/3, 3) and (2, 2.5)
np.asarray([np.sqrt(1.0 / 9.0 + 1.0)]),
),
)
def test_values(
self,
mask_true: np.ndarray,
mask_pred: np.ndarray,
spacing: Optional[np.ndarray],
expected: float,
) -> None:
"""Test exact values.
Args:
mask_true: shape = (batch, d1, ..., dn, num_classes)
mask_pred: shape = (batch, d1, ..., dn, num_classes)
spacing: spacing of pixel/voxels along each dimension, (n,).
expected: expected distance.
"""
got = self.variant(centroid_distance)(
mask_true=mask_true[None, ...],
mask_pred=mask_pred[None, ...],
grid=get_coordinate_grid(mask_true.shape[:-1]),
spacing=spacing,
)
chex.assert_trees_all_close(got[0], expected)
| 10,714 | 28.68144 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_metric_area.py | """Test area functions."""
import chex
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx.metric.area import class_proportion
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestClassProportion(chex.TestCase):
"""Test get_coordinate_grid."""
@chex.all_variants
@parameterized.named_parameters(
(
"1d-1class",
np.asarray([False, True, True, False])[..., None],
np.asarray([0.5])[..., None],
),
(
"1d-1class-empty",
np.asarray([False, False, False, False])[..., None],
np.asarray([0.0])[..., None],
),
(
"1d-2classes",
np.asarray(
[[False, True], [True, True], [True, False], [False, False]]
),
np.asarray([[0.5, 0.5]]),
),
(
"2d-1class",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
)[..., None],
np.asarray([3.0 / 20.0])[..., None],
),
)
def test_values(self, mask: np.ndarray, expected: np.ndarray) -> None:
"""Test exact values.
Args:
mask: shape = (batch, d1, ..., dn, num_classes).
expected: expected coordinates.
"""
got = self.variant(class_proportion)(
mask=mask[None, ...],
)
chex.assert_trees_all_close(got, expected)
| 1,794 | 27.492063 | 76 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_model_basic.py | """Test basic functions for model."""
import chex
import jax
from absl.testing import parameterized
from chex._src import fake
from imgx.model.basic import sinusoidal_positional_embedding
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestSinusoidalPositionalEmbedding(chex.TestCase):
"""Test the function sinusoidal_positional_embedding."""
@chex.variants(
with_jit=True, without_jit=True, with_device=True, without_device=True
)
@parameterized.named_parameters(
("case 1", 2, 4, 5),
(
"case 2",
3,
8,
10000,
),
)
def test_shapes(self, batch_size: int, dim: int, max_period: int) -> None:
"""Test output shapes under different device condition.
Args:
batch_size: batch size.
dim: embedding dimension, assume to be evenly divided by two.
max_period: controls the minimum frequency of the embeddings.
"""
rng = jax.random.PRNGKey(0)
x = jax.random.uniform(
rng,
shape=(batch_size,),
)
out = self.variant(
sinusoidal_positional_embedding, static_argnums=(1, 2)
)(
x,
dim=dim,
max_period=max_period,
)
chex.assert_shape(out, (batch_size, dim))
| 1,478 | 27.442308 | 78 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_diffusion_gaussian.py | """Test Gaussian diffusion related classes and functions."""
from typing import Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from absl.testing import parameterized
from chex._src import fake
from imgx.diffusion.gaussian_diffusion import (
DiffusionBetaSchedule,
DiffusionModelOutputType,
DiffusionModelVarianceType,
DiffusionSpace,
GaussianDiffusion,
extract_and_expand,
)
from imgx.model import Unet3dTime
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestExtractAndExpand(chex.TestCase):
"""Test extract_and_expand."""
@chex.variants(without_jit=True, with_device=True, without_device=True)
@parameterized.named_parameters(
(
"1d",
1,
),
(
"2d",
2,
),
(
"3d",
3,
),
)
def test_shapes(
self,
ndim: int,
) -> None:
"""Test output shape.
Args:
ndim: number of dimensions.
"""
batch_size = 2
betas = jnp.array([0, 0.2, 0.5, 1.0])
num_timesteps = len(betas)
rng = jax.random.PRNGKey(0)
t = jax.random.randint(
rng, shape=(batch_size,), minval=0, maxval=num_timesteps
)
got = self.variant(extract_and_expand)(arr=betas, t=t, ndim=ndim)
expected_shape = (batch_size,) + (1,) * (ndim - 1)
chex.assert_shape(got, expected_shape)
class TestGaussianDiffusion(chex.TestCase):
"""Test the class GaussianDiffusion."""
batch_size = 2
# unet
in_channels = 1
num_classes = 2
num_channels = (1, 2)
num_timesteps = 5
num_timesteps_beta = 1001
beta_schedule = DiffusionBetaSchedule.QUADRADIC
beta_start = 0.0001
beta_end = 0.02
x_limit = 1.0
use_ddim = False
@chex.variants(without_jit=True)
def test_attributes(
self,
) -> None:
"""Test attribute shape."""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward() -> GaussianDiffusion:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion
gd = forward()
chex.assert_shape(gd.betas, (self.num_timesteps,))
chex.assert_shape(gd.alphas_cumprod, (self.num_timesteps,))
chex.assert_shape(gd.alphas_cumprod_prev, (self.num_timesteps,))
chex.assert_shape(gd.alphas_cumprod_next, (self.num_timesteps,))
chex.assert_shape(gd.sqrt_alphas_cumprod, (self.num_timesteps,))
chex.assert_shape(
gd.sqrt_one_minus_alphas_cumprod, (self.num_timesteps,)
)
chex.assert_shape(
gd.log_one_minus_alphas_cumprod, (self.num_timesteps,)
)
chex.assert_shape(gd.sqrt_recip_alphas_cumprod, (self.num_timesteps,))
chex.assert_shape(
gd.sqrt_recip_alphas_cumprod_minus_one, (self.num_timesteps,)
)
chex.assert_shape(gd.posterior_mean_coeff_start, (self.num_timesteps,))
chex.assert_shape(gd.posterior_mean_coeff_t, (self.num_timesteps,))
chex.assert_shape(gd.posterior_variance, (self.num_timesteps,))
chex.assert_shape(
gd.posterior_log_variance_clipped, (self.num_timesteps,)
)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_q_mean_log_variance(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x_start: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray]:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.q_mean_log_variance(x_start=x_start, t=t)
rng = jax.random.PRNGKey(0)
rng_start, rng_t = jax.random.split(rng, num=2)
dummy_x_start = jax.random.uniform(
rng_start, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got_mean, got_log_var = forward(x_start=dummy_x_start, t=dummy_t)
expanded_shape = (dummy_x_start.shape[0],) + (1,) * (
dummy_x_start.ndim - 1
)
chex.assert_shape(got_mean, dummy_x_start.shape)
chex.assert_shape(got_log_var, expanded_shape)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_q_sample(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x_start: jnp.ndarray, noise: jnp.ndarray, t: jnp.ndarray
) -> jnp.ndarray:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.q_sample(x_start=x_start, noise=noise, t=t)
rng = jax.random.PRNGKey(0)
rng_start, rng_noise, rng_t = jax.random.split(rng, num=3)
dummy_x_start = jax.random.uniform(
rng_start, shape=(self.batch_size, *in_shape)
)
dummy_noise = jax.random.uniform(
rng_noise, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got = forward(x_start=dummy_x_start, noise=dummy_noise, t=dummy_t)
chex.assert_shape(got, dummy_x_start.shape)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_q_posterior_mean_variance(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x_start: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray]:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
rng_start = jax.random.PRNGKey(0)
rng_start, rng_x_t, rng_t = jax.random.split(rng_start, num=3)
dummy_x_start = jax.random.uniform(
rng_start, shape=(self.batch_size, *in_shape)
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got_mean, got_log_var = forward(
x_start=dummy_x_start, x_t=dummy_x_t, t=dummy_t
)
expanded_shape = (dummy_x_start.shape[0],) + (1,) * (
dummy_x_start.ndim - 1
)
chex.assert_shape(got_mean, dummy_x_start.shape)
chex.assert_shape(got_log_var, expanded_shape)
@chex.all_variants
@parameterized.product(
in_shape=[
(2,),
(2, 3),
(2, 3, 4),
],
model_out_type=[
DiffusionModelOutputType.X_START,
DiffusionModelOutputType.X_PREVIOUS,
DiffusionModelOutputType.EPSILON,
],
model_var_type=[
DiffusionModelVarianceType.FIXED_SMALL,
DiffusionModelVarianceType.FIXED_LARGE,
DiffusionModelVarianceType.LEARNED,
DiffusionModelVarianceType.LEARNED_RANGE,
],
)
def test_p_mean_variance(
self,
in_shape: Tuple[int, ...],
model_out_type: DiffusionModelOutputType,
model_var_type: DiffusionModelVarianceType,
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
model_out_type: define model output meaning.
model_var_type: define p(x_{t-1} | x_t) variance.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
model_out: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=model_out_type,
model_var_type=model_var_type,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.p_mean_variance(model_out=model_out, x_t=x_t, t=t)
rng_out = jax.random.PRNGKey(0)
rng_out, rng_x_t, rng_t = jax.random.split(rng_out, num=3)
num_out_channels = self.num_classes
if model_var_type in [
DiffusionModelVarianceType.LEARNED,
DiffusionModelVarianceType.LEARNED_RANGE,
]:
num_out_channels *= 2
model_out_shape = (self.batch_size, *in_shape, num_out_channels)
dummy_model_out = jax.random.uniform(
rng_out,
shape=model_out_shape,
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape, self.num_classes)
)
# for t = 0, x_prev is not well-defined
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=1, maxval=self.num_timesteps
)
(
got_x_start,
got_model_mean,
got_model_log_variance,
) = forward(model_out=dummy_model_out, x_t=dummy_x_t, t=dummy_t)
expanded_shape = (dummy_x_t.shape[0],) + (1,) * (dummy_x_t.ndim - 1)
assert (~jnp.isnan(got_x_start)).all()
chex.assert_shape(got_x_start, dummy_x_t.shape)
chex.assert_shape(got_model_mean, dummy_x_t.shape)
if model_var_type in [
DiffusionModelVarianceType.FIXED_SMALL,
DiffusionModelVarianceType.FIXED_LARGE,
]:
# variances are extended
chex.assert_shape(got_model_log_variance, expanded_shape)
else:
chex.assert_shape(got_model_log_variance, dummy_x_t.shape)
# check value range
chex.assert_scalar_in(
jnp.min(got_x_start).item(), -self.x_limit, self.x_limit
)
chex.assert_scalar_in(
jnp.max(got_x_start).item(), -self.x_limit, self.x_limit
)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_p_sample(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
model_out: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray]:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.p_sample(model_out=model_out, x_t=x_t, t=t)
rng_out = jax.random.PRNGKey(0)
rng_out, rng_x_t, rng_t = jax.random.split(rng_out, num=3)
model_out_shape = (self.batch_size, *in_shape)
dummy_model_out = jax.random.uniform(
rng_out,
shape=model_out_shape,
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got_sample, got_x_start_pred = forward(
model_out=dummy_model_out, x_t=dummy_x_t, t=dummy_t
)
chex.assert_shape(got_sample, dummy_x_t.shape)
chex.assert_shape(got_x_start_pred, dummy_x_t.shape)
# check value range
chex.assert_scalar_in(
jnp.min(got_sample).item(), -self.x_limit, self.x_limit
)
chex.assert_scalar_in(
jnp.max(got_sample).item(), -self.x_limit, self.x_limit
)
@chex.all_variants
def test_p_sample_mask(
self,
) -> None:
"""Test output shape."""
in_shape = (2, 3, 4)
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
image: jnp.ndarray,
x_t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function for p_sample_mask.
Args:
image: (batch, w, h, d, in_channels).
x_t: (batch, w, h, d, num_classes).
Returns:
p_sample_mask output.
"""
model = Unet3dTime(
in_shape=in_shape,
in_channels=self.in_channels + self.num_classes,
out_channels=self.num_classes,
num_channels=self.num_channels,
num_timesteps=self.num_timesteps,
)
diffusion = GaussianDiffusion(
model=model,
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.sample_mask(
image=image,
x_t=x_t,
)
rng_image = jax.random.PRNGKey(0)
rng_image, rng_x_t = jax.random.split(rng_image)
image_shape = (self.batch_size, *in_shape, self.in_channels)
dummy_image = jax.random.uniform(
rng_image,
shape=image_shape,
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape, self.num_classes)
)
got_sample = forward(
image=dummy_image,
x_t=dummy_x_t,
)
chex.assert_shape(got_sample, dummy_x_t.shape)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_variational_lower_bound(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
model_out: jnp.ndarray,
x_start: jnp.ndarray,
x_t: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.variational_lower_bound(
model_out=model_out, x_start=x_start, x_t=x_t, t=t
)
rng_out = jax.random.PRNGKey(0)
rng_out, rng_x_start, rng_x_t, rng_t = jax.random.split(rng_out, num=4)
model_out_shape = (self.batch_size, *in_shape)
dummy_model_out = jax.random.uniform(
rng_out,
shape=model_out_shape,
)
dummy_x_start = jax.random.uniform(
rng_x_start, shape=(self.batch_size, *in_shape)
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got = forward(
model_out=dummy_model_out,
x_start=dummy_x_start,
x_t=dummy_x_t,
t=dummy_t,
)
chex.assert_shape(got, (self.batch_size,))
| 19,693 | 33.250435 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_loss_cross_entropy.py | """Test dice loss functions."""
import chex
import jax
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx.loss import mean_cross_entropy, mean_focal_loss
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestCrossEntropyLoss(chex.TestCase):
"""Test mean_cross_entropy."""
prob_0 = 1 / (1 + np.exp(-1) + np.exp(-2))
prob_1 = np.exp(-1) / (1 + np.exp(-1) + np.exp(-2))
prob_2 = np.exp(-2) / (1 + np.exp(-1) + np.exp(-2))
@chex.all_variants
@parameterized.named_parameters(
(
"1d",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
np.mean(
-np.log(
[prob_2, prob_1, prob_0],
)
),
),
(
"2d",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
np.mean(
-np.log(
[prob_2, prob_1, prob_1, prob_2],
)
),
),
)
def test_values(
self,
logits: np.ndarray,
targets: np.ndarray,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
logits: unscaled prediction, of shape (..., num_classes).
targets: values are integers, of shape (...).
expected: expected output.
"""
num_classes = logits.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(mean_cross_entropy)(
logits=logits,
mask_true=mask_true,
)
chex.assert_trees_all_close(got, expected)
class TestFocalLoss(chex.TestCase):
"""Test mean_focal_loss."""
prob_0 = 1 / (1 + np.exp(-1) + np.exp(-2))
prob_1 = np.exp(-1) / (1 + np.exp(-1) + np.exp(-2))
prob_2 = np.exp(-2) / (1 + np.exp(-1) + np.exp(-2))
@chex.all_variants
@parameterized.named_parameters(
(
"1d-gamma=0.0",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
0.0,
np.mean(
-np.log(
[prob_2, prob_1, prob_0],
)
),
),
(
"2d-gamma=0.0",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
0.0,
np.mean(
-np.log(
[prob_2, prob_1, prob_1, prob_2],
)
),
),
(
"1d-gamma=1.2",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
1.2,
np.mean(
np.array(
[
-((1 - p) ** 1.2) * np.log(p)
for p in [prob_2, prob_1, prob_0]
],
)
),
),
(
"2d-gamma=1.2",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
1.2,
np.mean(
np.array(
[
-((1 - p) ** 1.2) * np.log(p)
for p in [prob_2, prob_1, prob_1, prob_2]
],
)
),
),
)
def test_values(
self,
logits: np.ndarray,
targets: np.ndarray,
gamma: float,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
logits: unscaled prediction, of shape (..., num_classes).
targets: values are integers, of shape (...).
gamma: adjust class imbalance, 0 is equivalent to cross entropy.
expected: expected output.
"""
num_classes = logits.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(mean_focal_loss)(
logits=logits,
mask_true=mask_true,
gamma=gamma,
)
chex.assert_trees_all_close(got, expected)
| 5,017 | 27.511364 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_train_state.py | """Test TrainState and related functions."""
from pathlib import Path
from typing import Dict
import chex
import jax.numpy as jnp
import jax.random
import jmp
import pytest
from chex._src import fake
from imgx.device import broadcast_to_local_devices
from imgx.exp import train_state
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
@pytest.fixture(name="dummy_train_state_dict")
def get_dummy_train_state_dict() -> Dict[str, jnp.ndarray]:
"""A dummy dict for train state attribute values.
Returns:
A dict of dummy values.
"""
key = jax.random.PRNGKey(0)
return {
"params": jax.random.uniform(key, (3, 5)),
"network_state": jax.random.uniform(key, (4, 5)),
"opt_state": jax.random.uniform(key, (5, 5)),
"global_step": jnp.array(0, dtype=jnp.int32),
"rng": jax.random.PRNGKey(0),
"ema_params": jax.random.uniform(key, (3, 3)),
"ema_network_state": jax.random.uniform(key, (3, 4)),
}
def test_save_restore_array_tree(
tmp_path: Path, dummy_train_state_dict: chex.ArrayTree
) -> None:
"""Test by saving and restoring.
Args:
tmp_path: fixture for temp path.
dummy_train_state_dict: dummy data to save.
"""
ckpt_dir = tmp_path / "ckpt"
ckpt_dir.mkdir()
train_state.save_array_tree(ckpt_dir, dummy_train_state_dict)
restored = train_state.restore_array_tree(ckpt_dir)
chex.assert_trees_all_equal(dummy_train_state_dict, restored)
@pytest.mark.parametrize(
"loss_scale_type",
[
"NoOpLossScale",
"StaticLossScale",
"DynamicLossScale",
],
)
def test_save_restore_ckpt(
loss_scale_type: str, tmp_path: Path, dummy_train_state_dict: chex.ArrayTree
) -> None:
"""Test by saving and restoring.
Args:
loss_scale_type: NoOpLossScale, StaticLossScale, DynamicLossScale.
tmp_path: fixture for temp path.
dummy_train_state_dict: dummy data to save.
"""
ckpt_dir = tmp_path / "ckpt"
ckpt_dir.mkdir()
train_state_dict = jax.tree_map(
broadcast_to_local_devices, dummy_train_state_dict
)
if loss_scale_type == "NoOpLossScale":
loss_scale = jmp.NoOpLossScale()
else:
scale = jmp.half_dtype()(2**15)
loss_scale = getattr(jmp, loss_scale_type)(scale)
loss_scale = broadcast_to_local_devices(loss_scale)
dummy_train_state = train_state.TrainState( # type: ignore[call-arg]
params=train_state_dict["params"],
network_state=train_state_dict["network_state"],
opt_state=train_state_dict["opt_state"],
loss_scale=loss_scale,
global_step=train_state_dict["global_step"],
rng=train_state_dict["rng"],
ema_params=train_state_dict["ema_params"],
ema_network_state=train_state_dict["ema_network_state"],
)
train_state.save_ckpt(dummy_train_state, ckpt_dir)
restored_train_state = train_state.restore_ckpt(ckpt_dir)
if loss_scale_type == "DynamicLossScale":
dummy_loss_scale = dummy_train_state.loss_scale
restored_loss_scale = restored_train_state.loss_scale
dummy_train_state.loss_scale = -1
restored_train_state.loss_scale = -1
chex.assert_trees_all_equal(dummy_train_state, restored_train_state)
chex.assert_trees_all_equal(
dummy_loss_scale.loss_scale, restored_loss_scale.loss_scale
)
chex.assert_trees_all_equal(
dummy_loss_scale.counter, restored_loss_scale.counter
)
chex.assert_trees_all_equal(
dummy_loss_scale.period, restored_loss_scale.period
)
chex.assert_trees_all_equal(
dummy_loss_scale.factor, restored_loss_scale.factor
)
else:
chex.assert_trees_all_equal(dummy_train_state, restored_train_state)
| 3,895 | 30.934426 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_exp_model.py | """Test mixed precision related functions in factory."""
import haiku as hk
import pytest
from omegaconf import DictConfig
from imgx.exp.model import build_vision_model
from imgx.model import MODEL_CLS_NAME_TO_CONFIG_NAME, SUPPORTED_VISION_MODELS
DUMMY_TASK_CONFIG = {
"name": "segmentation",
"diffusion": {
"num_timesteps": 4,
"num_timesteps_sample": 20,
"beta": {
"beta_schedule": "linear",
"beta_start": 0.0001,
"beta_end": 0.02,
},
"model_out_type": "x_start",
"model_var_type": "fixed_large",
"x_space": "scaled_probs",
"x_limit": 0.0,
},
}
DUMMY_MODEL_CONFIG = {
"remat": False,
"unet3d": {
"num_channels": [1, 2, 4],
},
"unet3d_slice": {
"num_channels": [1, 2, 4],
},
"unet3d_time": {
"num_channels": [1, 2, 4],
},
"unet3d_slice_time": {
"num_channels": [1, 2, 4],
},
}
@hk.testing.transform_and_run()
@pytest.mark.parametrize(
"model_class",
SUPPORTED_VISION_MODELS,
ids=SUPPORTED_VISION_MODELS,
)
def test_build_vision_model(model_class: str) -> None:
"""Test all supported models.
Args:
model_class: name of model class.
"""
data_config = {
"name": "male_pelvic_mr",
}
data_config = DictConfig(data_config)
model_config = DictConfig(DUMMY_MODEL_CONFIG)
task_config = DictConfig(DUMMY_TASK_CONFIG)
if model_class.endswith("_time"):
task_config["name"] = "diffusion"
model_config["name"] = MODEL_CLS_NAME_TO_CONFIG_NAME[model_class]
build_vision_model(
data_config=data_config,
task_config=task_config,
model_config=model_config,
)
| 1,733 | 24.5 | 77 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_dataset_augmentation.py | """Test function for data augmentation."""
from typing import Tuple
import chex
import jax
import jax.numpy as jnp
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx import IMAGE, LABEL
from imgx.datasets import FOREGROUND_RANGE
from imgx.datasets.augmentation import (
batch_apply_affine_to_grid,
batch_get_random_affine_matrix,
batch_random_affine_transform,
batch_resample_image_label,
get_affine_matrix,
get_rotation_matrix,
get_scaling_matrix,
get_translation_matrix,
)
from imgx.metric.centroid import get_coordinate_grid
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestDeterministicAffineMatrix(chex.TestCase):
"""Test deterministic affine matrix."""
sin_30 = 0.5
cos_30 = np.sqrt(3) / 2
sqrt2 = np.sqrt(2)
@chex.all_variants
@parameterized.named_parameters(
(
"2d - 30 degrees rotation",
np.asarray([np.pi / 6]),
np.asarray(
[
[cos_30, -sin_30, 0.0],
[sin_30, cos_30, 0.0],
[0.0, 0.0, 1.0],
]
),
np.asarray(
[
[sqrt2 * np.cos(75 / 180 * np.pi)],
[sqrt2 * np.sin(75 / 180 * np.pi)],
[1.0],
]
),
),
(
"3d - no rotation",
np.asarray([0.0, 0.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray([[1.0], [1.0], [1.0], [1.0]]),
),
(
"3d - x axis - 30 degrees rotation",
np.asarray([np.pi / 6, 0.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, cos_30, -sin_30, 0.0],
[0.0, sin_30, cos_30, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[sqrt2 * np.cos(75 / 180 * np.pi)],
[sqrt2 * np.sin(75 / 180 * np.pi)],
[1.0],
]
),
),
(
"3d - y axis - 30 degrees rotation",
np.asarray([0.0, np.pi / 6, 0.0]),
np.asarray(
[
[cos_30, 0.0, sin_30, 0.0],
[0.0, 1.0, 0.0, 0.0],
[-sin_30, 0.0, cos_30, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[sqrt2 * np.sin(75 / 180 * np.pi)],
[1.0],
[sqrt2 * np.cos(75 / 180 * np.pi)],
[1.0],
]
),
),
(
"3d - z axis - 30 degrees rotation",
np.asarray([0.0, 0.0, np.pi / 6]),
np.asarray(
[
[cos_30, -sin_30, 0.0, 0.0],
[sin_30, cos_30, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[sqrt2 * np.cos(75 / 180 * np.pi)],
[sqrt2 * np.sin(75 / 180 * np.pi)],
[1.0],
[1.0],
]
),
),
)
def test_rotation(
self,
radians: np.ndarray,
expected_affine_matrix: np.ndarray,
expected_rotated_vector: np.ndarray,
) -> None:
"""Test affine matrix values and rotated unit vector.
Args:
radians: values correspond to yz, xz, xy planes.
expected_affine_matrix: expected affine matrix.
expected_rotated_vector: expected rotated vector.
"""
if len(radians) > 1:
vector = jnp.array([[1.0], [1.0], [1.0], [1.0]])
else:
vector = jnp.array([[1.0], [1.0], [1.0]])
got_affine_matrix = self.variant(get_rotation_matrix)(
radians=radians,
)
chex.assert_trees_all_close(got_affine_matrix, expected_affine_matrix)
got_rotated_vector = jnp.matmul(got_affine_matrix, vector)
chex.assert_trees_all_close(got_rotated_vector, expected_rotated_vector)
@chex.all_variants
@parameterized.named_parameters(
(
"2d - shift",
np.asarray([-1.0, -2.0]),
np.asarray(
[
[1.0, 0.0, -1.0],
[0.0, 1.0, -2.0],
[0.0, 0.0, 1.0],
]
),
np.asarray([[0.0], [-1.0], [1.0]]),
),
(
"3d - no shift",
np.asarray([0.0, 0.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray([[1.0], [1.0], [1.0], [1.0]]),
),
(
"3d - shift x axis",
np.asarray([1.0, 0.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[2.0],
[1.0],
[1.0],
[1.0],
]
),
),
(
"3d - shift y axis",
np.asarray([0.0, 1.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[2.0],
[1.0],
[1.0],
]
),
),
(
"3d - shift z axis",
np.asarray([0.0, 0.0, 1.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[1.0],
[2.0],
[1.0],
]
),
),
)
def test_translation(
self,
shifts: np.ndarray,
expected_affine_matrix: np.ndarray,
expected_rotated_vector: np.ndarray,
) -> None:
"""Test affine matrix values and rotated unit vector.
Args:
shifts: correspond to each axis shift.
expected_affine_matrix: expected affine matrix.
expected_rotated_vector: expected rotated vector.
"""
vector = jnp.ones(shape=(len(shifts) + 1, 1))
got_affine_matrix = self.variant(get_translation_matrix)(
shifts=shifts,
)
chex.assert_trees_all_close(got_affine_matrix, expected_affine_matrix)
got_rotated_vector = jnp.matmul(got_affine_matrix, vector)
chex.assert_trees_all_close(got_rotated_vector, expected_rotated_vector)
@chex.all_variants
@parameterized.named_parameters(
(
"2d - scale",
np.asarray([2.0, 3.0]),
np.asarray(
[
[2.0, 0.0, 0.0],
[0.0, 3.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.asarray([[2.0], [3.0], [1.0]]),
),
(
"3d - no scale",
np.asarray([1.0, 1.0, 1.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray([[1.0], [1.0], [1.0], [1.0]]),
),
(
"3d - scale x axis",
np.asarray([2.0, 1.0, 1.0]),
np.asarray(
[
[2.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[2.0],
[1.0],
[1.0],
[1.0],
]
),
),
(
"3d - scale y axis",
np.asarray([1.0, 2.0, 1.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[2.0],
[1.0],
[1.0],
]
),
),
(
"3d - scale z axis",
np.asarray([1.0, 1.0, 2.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 2.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[1.0],
[2.0],
[1.0],
]
),
),
)
def test_scaling(
self,
scales: np.ndarray,
expected_affine_matrix: np.ndarray,
expected_rotated_vector: np.ndarray,
) -> None:
"""Test affine matrix values and rotated unit vector.
Args:
scales: correspond to each axis scaling.
expected_affine_matrix: expected affine matrix.
expected_rotated_vector: expected rotated vector.
"""
vector = jnp.ones(shape=(len(scales) + 1, 1))
got_affine_matrix = self.variant(get_scaling_matrix)(
scales=scales,
)
chex.assert_trees_all_close(got_affine_matrix, expected_affine_matrix)
got_rotated_vector = jnp.matmul(got_affine_matrix, vector)
chex.assert_trees_all_close(got_rotated_vector, expected_rotated_vector)
@chex.all_variants
@parameterized.named_parameters(
(
"2d - rotate - scale - shift",
np.asarray([np.pi / 2]),
np.asarray([-1.0, 1.0]),
np.asarray([0.8, 1.2]),
np.asarray(
[
[0.0, -0.8, -1.0],
[1.2, 0.0, 1.0],
[0.0, 0.0, 1.0],
],
),
np.asarray([[-1.8], [2.2]]),
),
)
def test_affine(
self,
radians: np.ndarray,
shifts: np.ndarray,
scales: np.ndarray,
expected_affine_matrix: np.ndarray,
expected_rotated_vector: np.ndarray,
) -> None:
"""Test affine matrix values and rotated unit vector.
Args:
radians: correspond to rotate around each axis.
shifts: correspond to each axis shift.
scales: correspond to each axis scaling.
expected_affine_matrix: expected affine matrix.
expected_rotated_vector: expected rotated vector.
"""
vector = jnp.ones(shape=(len(scales) + 1, 1))
got_affine_matrix = self.variant(get_affine_matrix)(
radians=radians,
shifts=shifts,
scales=scales,
)
chex.assert_trees_all_close(
got_affine_matrix, expected_affine_matrix, atol=1e-6
)
got_rotated_vector = jnp.matmul(got_affine_matrix[:-1, :], vector)
chex.assert_trees_all_close(got_rotated_vector, expected_rotated_vector)
class TestRandomAffineMatrix(chex.TestCase):
"""Test random affine matrix sampling."""
@chex.all_variants
@parameterized.named_parameters(
(
"2d - batch size 1",
1,
np.asarray(
[
0.088,
]
),
np.asarray([20, 4]),
np.asarray([0.15, 0.15]),
(3, 3),
),
(
"2d - batch size 2",
2,
np.asarray(
[
0.088,
]
),
np.asarray([20, 4]),
np.asarray([0.15, 0.15]),
(3, 3),
),
(
"3d - batch size 2",
2,
np.asarray([0.088, 0.088, 0.088]),
np.asarray([20, 20, 4]),
np.asarray([0.15, 0.15, 0.15]),
(4, 4),
),
)
def test_values(
self,
batch_size: int,
max_rotation: np.ndarray,
max_translation: np.ndarray,
max_scaling: np.ndarray,
expected_shape: Tuple,
) -> None:
"""Test affine matrix values.
Test affine matrix shapes, and test random seed impact.
Args:
batch_size: number of samples in batch.
max_rotation: maximum rotation in radians.
max_translation: maximum translation in pixel/voxels.
max_scaling: maximum scaling difference in pixel/voxels.
expected_shape: expected shape of affine matrix.
"""
max_rotation = np.tile(max_rotation[None, ...], (batch_size, 1))
max_translation = np.tile(max_translation[None, ...], (batch_size, 1))
max_scaling = np.tile(max_scaling[None, ...], (batch_size, 1))
# check output shape
key1 = jax.random.PRNGKey(1)
got1 = self.variant(batch_get_random_affine_matrix)(
key=key1,
max_rotation=max_rotation,
min_translation=-max_translation,
max_translation=max_translation,
max_scaling=max_scaling,
)
chex.assert_shape(got1, (batch_size, *expected_shape))
# if batch size > 1, each affine matrix should be different
if batch_size > 1:
diff = jnp.sum(jnp.abs(got1[1, ...] - got1[0, ...])).item()
chex.assert_scalar_positive(diff)
# same seed should provide same values
got2 = self.variant(batch_get_random_affine_matrix)(
key=key1,
max_rotation=max_rotation,
min_translation=-max_translation,
max_translation=max_translation,
max_scaling=max_scaling,
)
chex.assert_trees_all_equal(got1, got2)
# different seeds should provide different values
key3 = jax.random.PRNGKey(3)
got3 = self.variant(batch_get_random_affine_matrix)(
key=key3,
max_rotation=max_rotation,
min_translation=-max_translation,
max_translation=max_translation,
max_scaling=max_scaling,
)
diff = jnp.sum(jnp.abs(got1 - got3)).item()
chex.assert_scalar_positive(diff)
class TestApplyAffineMatrix(chex.TestCase):
"""Test apply_affine_to_grid."""
@chex.all_variants
@parameterized.parameters(4, 1)
def test_values(
self,
batch_size: int,
) -> None:
"""Test transformed grid values.
Args:
batch_size: number of samples in batch.
"""
grid = np.asarray(
[
# x
[
[0.0, 0.0],
[1.0, 1.0],
[2.0, 2.0],
],
# y
[
[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0],
],
],
)
affine_matrix = np.asarray(
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 0.0],
[0.0, 0.0, 1.0],
],
)
expected = np.asarray(
[
# shift -> 2x+y -> shift back
[
[-1.5, -0.5],
[0.5, 1.5],
[2.5, 3.5],
],
# shift -> 3y -> shift back
[
[-1.0, 2.0],
[-1.0, 2.0],
[-1.0, 2.0],
],
],
)
batch_affine_matrix = np.tile(
affine_matrix[None, ...], (batch_size, 1, 1)
)
batch_expected = np.tile(
expected[None, ...], (batch_size,) + (1,) * len(expected.shape)
)
got = self.variant(batch_apply_affine_to_grid)(
grid=grid,
affine_matrix=batch_affine_matrix,
)
chex.assert_trees_all_equal(got, batch_expected)
class TestResample(chex.TestCase):
"""Test apply_affine_to_grid."""
@chex.all_variants
@parameterized.named_parameters(
(
"2d - batch",
np.asarray(
[
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
],
),
np.asarray(
[
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
],
),
np.asarray(
[
# first image, un changed
[
# x axis
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]],
# y axis
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
],
# second image, changed
# (0.4, 0) x-axis linear interpolation
# (0, 0.6) y-axis linear interpolation
# (0.4, 1.6) x/y-axis linear interpolation
# (1.0, 3.0) out of boundary
[
# x axis
[[0.4, 0.0, 0.4], [1.0, 1.0, 1.0]],
# y axis
[[0.0, 0.6, 1.6], [0.0, 3.0, 2.0]],
],
]
), # (batch=2, n=2, d1=2, d2=3)
np.asarray(
[
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
[
[1.2, 1.4, 1.68],
[0.0, 0.0, 4.0],
],
],
),
np.asarray(
[
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
[
[2.0, 1.0, 0.0],
[0.0, 0.0, 4.0],
],
],
),
),
)
def test_shapes(
self,
image: np.ndarray,
label: np.ndarray,
grid: np.ndarray,
expected_image: np.ndarray,
expected_label: np.ndarray,
) -> None:
"""Test affine matrix values.
Test affine matrix shapes, and test random seed impact.
Args:
image: input image batch.
label: input label batch.
grid: batch of grid with affine applied.
expected_image: expected image.
expected_label: expected label.
"""
input_dict = {IMAGE: image, LABEL: label}
got = self.variant(batch_resample_image_label)(
input_dict=input_dict,
grid=grid,
)
expected = {IMAGE: expected_image, LABEL: expected_label}
chex.assert_trees_all_close(got, expected)
class TestRandomAffineTransformation(chex.TestCase):
"""Test batch_random_affine_transform."""
@chex.all_variants
@parameterized.product(
(
{
"max_rotation": np.asarray([0.088, 0.088, 0.088]),
"max_translation": np.asarray([2, 3, 1]),
"max_scaling": np.asarray([0.05, 0.05, 0.05]),
"image_shape": (8, 12, 6),
},
{
"max_rotation": np.asarray([0.088]),
"max_translation": np.asarray([2, 3]),
"max_scaling": np.asarray([0.05, 0.05]),
"image_shape": (8, 12),
},
),
batch_size=[4, 1],
)
def test_shapes(
self,
batch_size: int,
max_rotation: np.ndarray,
max_translation: np.ndarray,
max_scaling: np.ndarray,
image_shape: Tuple,
) -> None:
"""Test affine matrix values.
Test affine matrix shapes, and test random seed impact.
Args:
batch_size: number of samples in batch.
max_rotation: maximum rotation in radians.
max_translation: maximum translation in pixel/voxels.
max_scaling: maximum scaling difference in pixel/voxels.
image_shape: image spatial shape.
"""
key = jax.random.PRNGKey(0)
grid = get_coordinate_grid(shape=image_shape)
image = jax.random.uniform(
key=key, shape=(batch_size, *image_shape), minval=0, maxval=1
)
label = jax.random.uniform(
key=key, shape=(batch_size, *image_shape), minval=0, maxval=1
)
label = jnp.asarray(label > jnp.mean(label), dtype=np.float32)
input_dict = {
IMAGE: image,
LABEL: label,
FOREGROUND_RANGE: jnp.zeros((len(image_shape), 2)),
}
got = self.variant(batch_random_affine_transform)(
key=key,
input_dict=input_dict,
grid=grid,
max_rotation=max_rotation,
max_translation=max_translation,
max_scaling=max_scaling,
)
# check shapes
assert len(got) == 2
chex.assert_shape(got[IMAGE], (batch_size, *image_shape))
chex.assert_shape(got[LABEL], (batch_size, *image_shape))
# check label remains boolean
assert jnp.unique(got[LABEL]).size == jnp.unique(label).size
| 23,132 | 29.081925 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_loss_dice.py | """Test dice loss functions."""
import chex
import jax
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx.loss import mean_dice_loss
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestDiceLoss(chex.TestCase):
"""Test dice_loss."""
prob_0 = 1 / (1 + np.exp(-1) + np.exp(-2))
prob_1 = np.exp(-1) / (1 + np.exp(-1) + np.exp(-2))
prob_2 = np.exp(-2) / (1 + np.exp(-1) + np.exp(-2))
@chex.all_variants
@parameterized.named_parameters(
(
"1d-with-background",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
True,
np.mean(
np.array(
[
[
1 - 2 * prob_0 / (3 * prob_0 + 1),
1 - 2 * prob_1 / (3 * prob_1 + 1),
1 - 2 * prob_2 / (3 * prob_2 + 1),
]
],
)
),
),
(
"1d-without-background",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
False,
np.mean(
np.array(
[
[
1 - 2 * prob_1 / (3 * prob_1 + 1),
1 - 2 * prob_2 / (3 * prob_2 + 1),
]
],
)
),
),
(
"1d-without-and-miss-background",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1]]),
False,
np.mean(
np.array(
[
[
1 - 2 * prob_1 / (2 * prob_1 + 1),
1 - 2 * prob_2 / (2 * prob_2 + 1),
]
],
)
),
),
(
"2d-with-background",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
True,
np.mean(
np.array(
[
[
1 - 2 * prob_1 / (3 * prob_0 + prob_1 + 1),
1
- 2
* (prob_1 + prob_2)
/ (prob_0 + 2 * prob_1 + prob_2 + 2),
1 - 2 * prob_2 / (prob_1 + 3 * prob_2 + 1),
]
],
)
),
),
(
"2d-without-background",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
False,
np.mean(
np.array(
[
[
1
- 2
* (prob_1 + prob_2)
/ (prob_0 + 2 * prob_1 + prob_2 + 2),
1 - 2 * prob_2 / (prob_1 + 3 * prob_2 + 1),
]
],
)
),
),
(
"2d-with-empty-class-and-background",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[0, 1], [0, 1]]]),
True,
np.mean(
np.array(
[
[
1
- 2 * (prob_0 + prob_1) / (3 * prob_0 + prob_1 + 2),
1
- 2
* (prob_1 + prob_2)
/ (prob_0 + 2 * prob_1 + prob_2 + 2),
]
],
)
),
),
(
"2d-with-empty-class-without-background",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[0, 1], [0, 1]]]),
False,
np.array(
1 - 2 * (prob_1 + prob_2) / (prob_0 + 2 * prob_1 + prob_2 + 2),
),
),
)
def test_values(
self,
logits: np.ndarray,
targets: np.ndarray,
include_background: bool,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
logits: unscaled prediction, of shape (..., num_classes).
targets: values are integers, of shape (...).
include_background: include background as a separate class.
expected: expected output.
"""
num_classes = logits.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(mean_dice_loss)(
logits=logits,
mask_true=mask_true,
include_background=include_background,
)
chex.assert_trees_all_close(got, expected)
| 5,992 | 29.42132 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_diffusion_variance_schedule.py | """Test Gaussian diffusion related classes and functions."""
import chex
import jax.numpy as jnp
from absl.testing import parameterized
from chex._src import fake
from imgx.diffusion.variance_schedule import (
DiffusionBetaSchedule,
downsample_beta_schedule,
get_beta_schedule,
)
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestGetBetaSchedule(chex.TestCase):
"""Test get_beta_schedule."""
@parameterized.product(
num_timesteps=[1, 4],
beta_schedule=[
DiffusionBetaSchedule.LINEAR,
DiffusionBetaSchedule.QUADRADIC,
DiffusionBetaSchedule.COSINE,
DiffusionBetaSchedule.WARMUP10,
DiffusionBetaSchedule.WARMUP50,
],
)
def test_shapes(
self,
num_timesteps: int,
beta_schedule: DiffusionBetaSchedule,
) -> None:
"""Test output shape."""
beta_start = 0.0
beta_end = 0.2
got = get_beta_schedule(
num_timesteps=num_timesteps,
beta_schedule=beta_schedule,
beta_start=beta_start,
beta_end=beta_end,
)
chex.assert_shape(got, (num_timesteps,))
assert got[0] == beta_start
if num_timesteps > 1:
chex.assert_trees_all_close(got[-1], beta_end)
class TestDownsampleBetaSchedule(chex.TestCase):
"""Test downsample_beta_schedule."""
@parameterized.named_parameters(
("same", 10, 10),
("downsample 11 to 6", 11, 6),
("downsample 101 to 5", 101, 5),
("downsample to two", 10, 2),
)
def test_values(
self,
num_timesteps: int,
num_timesteps_to_keep: int,
) -> None:
"""Test output values and shapes."""
betas = jnp.linspace(0.0, 1.0, num_timesteps)
got = downsample_beta_schedule(
betas, num_timesteps, num_timesteps_to_keep
)
chex.assert_shape(got, (num_timesteps_to_keep,))
chex.assert_trees_all_close(got[0], betas[0])
chex.assert_trees_all_close(got[-1], betas[-1])
| 2,208 | 27.320513 | 60 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_exp_mixed_precision.py | """Test mixed precision related functions in factory."""
import haiku as hk
import pytest
from imgx import model
from imgx.exp.mixed_precision import set_mixed_precision_policy
from imgx.model import MODEL_CLS_NAME_TO_CONFIG_NAME
from imgx.model import __all__ as all_model_classes
@pytest.mark.parametrize(
"model_class",
all_model_classes,
ids=all_model_classes,
)
def test_set_mixed_precision_policy(model_class: str) -> None:
"""Test all supported models.
Args:
model_class: name of model class.
"""
set_mixed_precision_policy(True, MODEL_CLS_NAME_TO_CONFIG_NAME[model_class])
# clear policy, otherwise impact other tests
hk.mixed_precision.clear_policy(hk.BatchNorm)
hk.mixed_precision.clear_policy(hk.GroupNorm)
hk.mixed_precision.clear_policy(hk.LayerNorm)
hk.mixed_precision.clear_policy(hk.InstanceNorm)
hk.mixed_precision.clear_policy(getattr(model, model_class))
| 938 | 31.37931 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_metric_surface_distance.py | """Test loss functions."""
from functools import partial
from typing import Callable, List, Tuple, Union
import chex
import jax
import numpy as np
from absl.testing import parameterized
from imgx.metric.surface_distance import (
aggregated_surface_distance,
average_surface_distance,
get_binary_mask_bounding_box,
get_mask_edges,
get_surface_distance,
hausdorff_distance,
normalized_surface_dice,
normalized_surface_dice_from_distances,
)
def create_spherical_seg_3d(
radius: float,
centre: Tuple[int, int, int],
shape: Tuple[int, int, int],
) -> np.ndarray:
"""Return a binary 3D image with a sphere inside.
Voxel values will be 1 inside the sphere, and 0 elsewhere.
https://github.com/Project-MONAI/MONAI/blob/dev/tests/test_surface_distance.py
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
shape: shape of image to create.
"""
image = np.zeros(shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : shape[0] - centre[0],
-centre[1] : shape[1] - centre[1],
-centre[2] : shape[2] - centre[2],
]
sphere = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[sphere] = 1
image[~sphere] = 0
return image
def create_circle_seg_2d(
radius: float,
centre: Tuple[int, int],
shape: Tuple[int, int],
) -> np.ndarray:
"""Return a binary 2D image with a sphere inside.
Pixel values will be 1 inside the circle, and 0 elsewhere
Args:
radius: radius of sphere (in terms of number of pixels, can be partial)
centre: location of sphere centre.
shape: shape of image to create.
"""
image = np.zeros(shape, dtype=np.int32)
spy, spx = np.ogrid[
-centre[0] : shape[0] - centre[0],
-centre[1] : shape[1] - centre[1],
]
circle = (spx * spx + spy * spy) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
class TestBBox(chex.TestCase):
"""Test get_binary_mask_bounding_box."""
@parameterized.named_parameters(
(
"1d-int",
np.array([0, 1, 0, 1, 0]),
np.array([1]),
np.array([4]),
),
(
"1d-bool",
np.array([False, True, False, True, False]),
np.array([1]),
np.array([4]),
),
(
"1d-all-true",
np.array([True, True, True, True, True]),
np.array([0]),
np.array([5]),
),
(
"1d-all-false",
np.array([False, False, False, False, False]),
np.array([-1]),
np.array([-1]),
),
(
"2d-1x5",
np.array([[0, 1, 0, 1, 0]]),
np.array([0, 1]),
np.array([1, 4]),
),
(
"2d-2x5",
np.array([[0, 1, 0, 1, 0], [1, 1, 0, 1, 0]]),
np.array([0, 0]),
np.array([2, 4]),
),
(
"2d-2x5-all-false",
np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]),
np.array([-1, -1]),
np.array([-1, -1]),
),
)
def test_values(
self,
mask: np.ndarray,
expected_bbox_min: np.ndarray,
expected_bbox_max: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
mask: binary mask with only spatial axes.
expected_bbox_min: expected bounding box min, inclusive.
expected_bbox_max: expected bounding box max, exclusive.
"""
got_bbox_min, got_bbox_max = get_binary_mask_bounding_box(
mask=mask,
)
chex.assert_trees_all_close(got_bbox_min, expected_bbox_min)
chex.assert_trees_all_close(got_bbox_max, expected_bbox_max)
class TestMaskEdge(chex.TestCase):
"""Test get_mask_edges."""
@parameterized.named_parameters(
(
"2d-same-smaller",
create_circle_seg_2d(radius=2, centre=(4, 4), shape=(7, 7)),
create_circle_seg_2d(radius=2, centre=(4, 4), shape=(7, 7)),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
),
(
"2d-diff-smaller",
create_circle_seg_2d(radius=2, centre=(4, 4), shape=(7, 7)),
create_circle_seg_2d(radius=1, centre=(4, 4), shape=(7, 7)),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, False, False, False],
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
]
),
),
(
"2d-shift",
create_circle_seg_2d(radius=1, centre=(4, 4), shape=(7, 7)),
create_circle_seg_2d(radius=1, centre=(3, 4), shape=(7, 7)),
np.array(
[
[False, False, False],
[False, True, False],
[True, False, True],
[False, True, False],
]
),
np.array(
[
[False, True, False],
[True, False, True],
[False, True, False],
[False, False, False],
]
),
),
(
"2d-zero",
np.zeros((5, 5)),
np.zeros((5, 5)),
np.zeros((5, 5), dtype=np.bool_),
np.zeros((5, 5), dtype=np.bool_),
),
)
def test_values(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
expected_edge_pred: np.ndarray,
expected_edge_true: np.ndarray,
) -> None:
"""Test return values.
Args:
mask_pred: the predicted binary mask.
mask_true: the ground truth binary mask.
expected_edge_pred: the predicted binary edge.
expected_edge_true: the ground truth binary edge.
"""
got_edge_pred, got_edge_true = get_mask_edges(
mask_pred=mask_pred,
mask_true=mask_true,
)
chex.assert_trees_all_close(got_edge_pred, expected_edge_pred)
chex.assert_trees_all_close(got_edge_true, expected_edge_true)
class TestSurfaceDistance(chex.TestCase):
"""Test surface_distance related functions."""
@parameterized.product(
ndims=[2, 3],
func=[
partial(average_surface_distance, spacing=None),
partial(hausdorff_distance, percentile=100, spacing=None),
],
)
def test_nan_distance(
self,
ndims: int,
func: Callable,
) -> None:
"""Test average_surface_distance returns nan given empty inputs.
Args:
ndims: numbder of spatial dimentions.
func: function to test.
"""
batch = 2
num_classes = 4
# build dummy input having non-zero edges
shape = (batch,) + (num_classes,) * ndims
mask_true = np.zeros(shape)
for i in range(num_classes):
mask_true[:, i, ...] = i
mask_true = np.array(
jax.nn.one_hot(
x=mask_true,
num_classes=num_classes,
axis=-1,
)
)
mask_pred = np.zeros_like(mask_true)
got = func(
mask_pred=mask_pred,
mask_true=mask_true,
)
assert np.isnan(got).all()
got = func(
mask_pred=mask_true,
mask_true=mask_pred,
)
assert np.isnan(got).all()
got = func(
mask_pred=mask_pred,
mask_true=mask_pred,
)
assert np.isnan(got).all()
@parameterized.product(
ndims=[2, 3],
func=[
partial(average_surface_distance, spacing=None),
partial(hausdorff_distance, percentile=100, spacing=None),
],
)
def test_zero_distance(
self,
ndims: int,
func: Callable,
) -> None:
"""Test average_surface_distance returns zero given same inputs.
Args:
ndims: numbder of spatial dimentions.
func: function to test.
"""
batch = 2
num_classes = 4
# build dummy input having non-zero edges
shape = (batch,) + (num_classes,) * ndims
mask_true = np.zeros(shape)
for i in range(num_classes):
mask_true[:, i, ...] = i
mask_true = np.array(
jax.nn.one_hot(
x=mask_true,
num_classes=num_classes,
axis=-1,
)
)
got = func(mask_pred=mask_true, mask_true=mask_true)
expected = np.zeros((batch, num_classes))
assert np.array_equal(got, expected)
@parameterized.named_parameters(
(
"2d-4x3",
np.array(
[
[False, False, False],
[False, True, False],
[True, False, True],
[False, True, False],
]
),
np.array(
[
[False, True, False],
[True, False, True],
[False, True, False],
[False, False, False],
]
),
(1.0, 1.0),
np.array([1.0, 1.0, 1.0, 1.0]),
),
(
"2d-5x5",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
(1.0, 1.0),
np.array(
[
0.0,
0.0,
0.0,
np.sqrt(2),
np.sqrt(2),
np.sqrt(2),
np.sqrt(2),
2.0,
]
),
),
(
"2d-5x5-heterogeneous-1",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
(1.0, 2.0),
np.array(
[
0.0,
0.0,
0.0,
np.sqrt(5),
np.sqrt(5),
2.0, # via x axis it's shorter
2.0, # via x axis it's shorter
2.0,
]
),
),
(
"2d-5x5-heterogeneous-2",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
np.array(
[
0.0,
0.0,
0.0,
2.0, # via x axis it's shorter
]
),
),
(
"2d-6x5",
np.array(
[
[False, True, True, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 1.0),
np.array(
[
np.sqrt(10),
3.0,
np.sqrt(10),
np.sqrt(8),
np.sqrt(8),
np.sqrt(2),
np.sqrt(2),
0.0,
]
),
),
)
def test_surface_distance(
self,
edge_pred: np.ndarray,
edge_true: np.ndarray,
spacing: Tuple[float, ...],
expected: np.ndarray,
) -> None:
"""Test get_surface_distance with accurate expected values.
Args:
edge_pred: the predicted binary edge.
edge_true: the ground truth binary edge.
spacing: spacing of pixel/voxels along each dimension.
expected: surface distance, 1D array of len = edge size.
"""
got = get_surface_distance(
edge_pred=edge_pred,
edge_true=edge_true,
spacing=spacing,
)
assert np.array_equal(got, expected)
@parameterized.named_parameters(
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-asymmetric-1",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
(1.0, 2.0),
False,
[
partial(np.percentile, q=0),
partial(np.percentile, q=100),
],
[1, 1],
np.array(
[
0.0,
np.sqrt(5),
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-asymmetric-2",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
False,
[
partial(np.percentile, q=0),
partial(np.percentile, q=100),
],
[1, 1],
np.array(
[
0.0,
2.0,
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-symmetric",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
True,
[
partial(np.percentile, q=0),
partial(np.percentile, q=100),
normalized_surface_dice_from_distances,
],
[1, 1, 2],
np.array(
[
0.0,
np.sqrt(5.0),
0.5,
]
),
),
)
def test_agg_surface_distance(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Tuple[float, ...],
symmetric: bool,
agg_funcs: Union[Callable, List[Callable]],
num_args: Union[int, List[int]],
expected: np.ndarray,
) -> None:
"""Test get_surface_distance with accurate expected values.
Args:
mask_pred: predictions, without batch and class axes.
mask_true: targets, without batch and class axes.
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
agg_funcs: a function or a list of functions
to aggregate a list of distances.
num_args: a int or a list of ints, corresponding to number of
arguments for agg_fn_list.
expected: surface distance, 1D array of len = edge size.
"""
got = aggregated_surface_distance(
mask_pred=mask_pred[None, ..., None],
mask_true=mask_true[None, ..., None],
agg_fns=agg_funcs,
num_args=num_args,
spacing=spacing,
symmetric=symmetric,
)
assert np.array_equal(got[:, 0, 0], expected)
@parameterized.named_parameters(
(
"monai_3d_example1",
create_spherical_seg_3d(
radius=33, centre=(19, 33, 22), shape=(99, 99, 99)
),
create_spherical_seg_3d(
radius=33, centre=(20, 33, 22), shape=(99, 99, 99)
),
(1.0, 1.0, 1.0),
False,
0.3483278807706289,
),
(
"monai_3d_example2",
create_spherical_seg_3d(
radius=20, centre=(20, 33, 22), shape=(99, 99, 99)
),
create_spherical_seg_3d(
radius=40, centre=(20, 33, 22), shape=(99, 99, 99)
),
(1.0, 1.0, 1.0),
False,
12.040033513150455,
),
)
def test_average_surface_distance(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Tuple[float, ...],
symmetric: bool,
expected: float,
) -> None:
"""Test average_surface_distance with accurate expected values.
https://github.com/Project-MONAI/MONAI/blob/dev/tests/test_surface_distance.py
Args:
mask_pred: predictions, without batch and class axes.
mask_true: targets, without batch and class axes.
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
expected: expected value.
"""
got = average_surface_distance(
mask_pred=mask_pred[None, ..., None],
mask_true=mask_true[None, ..., None],
spacing=spacing,
symmetric=symmetric,
)
assert np.isclose(np.mean(got), expected)
@parameterized.named_parameters(
(
"monai_3d_example1",
create_spherical_seg_3d(
radius=20, centre=(20, 20, 20), shape=(99, 99, 99)
),
create_spherical_seg_3d(
radius=20, centre=(10, 20, 20), shape=(99, 99, 99)
),
(1.0, 1.0, 1.0),
False,
10,
),
(
"2d_same_center_diff_radii",
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
create_circle_seg_2d(radius=20, centre=(50, 50), shape=(99, 99)),
(1.0, 1.0),
False,
10,
),
(
"2d_diff_centers_same_radius",
create_circle_seg_2d(radius=20, centre=(50, 51), shape=(99, 99)),
create_circle_seg_2d(radius=20, centre=(50, 50), shape=(99, 99)),
(1.0, 1.0),
False,
1,
),
(
"2d_diff_centers_diff_radii_asymmetric1",
create_circle_seg_2d(radius=5, centre=(60, 50), shape=(99, 99)),
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
(1.0, 1.0),
False,
5,
),
(
"2d_diff_centers_diff_radii_asymmetric2",
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
create_circle_seg_2d(radius=5, centre=(60, 50), shape=(99, 99)),
(1.0, 1.0),
False,
15,
),
(
"2d_diff_centers_diff_radii_symmetric1",
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
create_circle_seg_2d(radius=5, centre=(60, 50), shape=(99, 99)),
(1.0, 1.0),
True,
15,
),
(
"2d_diff_centers_diff_radii_symmetric2",
create_circle_seg_2d(radius=5, centre=(60, 50), shape=(99, 99)),
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
(1.0, 1.0),
True,
15,
),
)
def test_max_hausdorff_distance(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Tuple[float, ...],
symmetric: bool,
expected: float,
) -> None:
"""Test hausdorff_distance with 100 percentile.
Some test cases come from
https://github.com/Project-MONAI/MONAI/blob/dev/tests/test_hausdorff_distance.py
Args:
mask_pred: predictions, without batch and class axes.
mask_true: targets, without batch and class axes.
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
expected: expected value.
"""
got = hausdorff_distance(
mask_pred=mask_pred[None, ..., None],
mask_true=mask_true[None, ..., None],
percentile=100,
spacing=spacing,
symmetric=symmetric,
)
assert np.isclose(np.mean(got), expected)
@parameterized.named_parameters(
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-1mm",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
1.0,
np.array(
[
0.5,
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-1.5mm",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
1.5,
np.array(
[
0.5,
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-2mm",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
2.0,
np.array(
[
10.0 / 12.0,
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-2.24mm",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
2.24, # sqrt(5) = 2.236...
np.array(
[
1.0,
]
),
),
)
def test_normalized_surface_dice(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Tuple[float, ...],
tolerance_mm: float,
expected: float,
) -> None:
"""Test average_surface_distance with accurate expected values.
https://github.com/Project-MONAI/MONAI/blob/dev/tests/test_surface_distance.py
Args:
mask_pred: predictions, without batch and class axes.
mask_true: targets, without batch and class axes.
spacing: spacing of pixel/voxels along each dimension.
tolerance_mm: tolerance value to consider surface being overlapping.
expected: expected value.
"""
got = normalized_surface_dice(
mask_pred=mask_pred[None, ..., None],
mask_true=mask_true[None, ..., None],
spacing=spacing,
tolerance_mm=tolerance_mm,
)
assert np.isclose(np.mean(got), expected)
| 31,405 | 31.444215 | 88 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_metric_dice.py | """Test dice score metric related functions."""
import chex
import jax
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx.metric import dice_score, iou
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestDiceScore(chex.TestCase):
"""Test dice_score."""
@chex.all_variants
@parameterized.named_parameters(
(
"1d",
np.array([[[0.2, 0.3, 0.5], [0.0, 1.0, 0.0]]]),
np.array([[2, 1]]),
np.array(
[
[
0.0, # no target on background
2.0 / 2.3,
1.0 / 1.5,
]
],
),
),
(
"2d",
np.array(
[
[
[[0.2, 0.3, 0.5], [0.0, 1.0, 0.0]],
[[0.9, 0.0, 0.1], [0.5, 0.1, 0.4]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
np.array(
[
[
1.8 / 2.6,
2.2 / 3.4,
1.0 / 2.0,
]
],
),
),
)
def test_values(
self,
mask_pred: np.ndarray,
targets: np.ndarray,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
mask_pred: unscaled prediction, of shape (..., num_classes).
targets: values are integers, of shape (...).
expected: expected output.
"""
num_classes = mask_pred.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(dice_score)(
mask_pred=mask_pred,
mask_true=mask_true,
)
chex.assert_trees_all_close(got, expected)
class TestIOU(chex.TestCase):
"""Test iou."""
@chex.all_variants
@parameterized.named_parameters(
(
"1d",
np.array([[[0.2, 0.3, 0.5], [0.0, 1.0, 0.0]]]),
np.array([[2, 1]]),
np.array(
[
[
0.0, # no target on background
1.0 / 1.3,
0.5 / 1.0,
]
],
),
),
(
"2d",
np.array(
[
[
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
np.array(
[
[
0.5,
0.5,
1.0,
]
],
),
),
(
"2d-nan",
np.array(
[
[
[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
],
]
),
np.array([[[0, 1], [0, 1]]]),
np.array(
[
[
1.0 / 3.0,
1.0 / 3.0,
np.nan,
]
],
),
),
)
def test_values(
self,
mask_pred: np.ndarray,
targets: np.ndarray,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
mask_pred: soft mask, of shape (..., num_classes).
targets: values are integers, of shape (...).
expected: expected output.
"""
num_classes = mask_pred.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(iou)(
mask_pred=mask_pred,
mask_true=mask_true,
)
chex.assert_trees_all_close(got, expected)
| 4,360 | 25.430303 | 77 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_model_unet_3d_time.py | """Test Unet related classes and functions."""
from typing import Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from absl.testing import parameterized
from chex._src import fake
from imgx.model import Unet3dSliceTime, Unet3dTime
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestTimeUnet3d(chex.TestCase):
"""Test the class TimeUnet3d and Unet3dSliceTime."""
batch_size = 2
in_channels = 1
out_channels = 2
num_timesteps = 4
@parameterized.product(
(
{
"in_shape": (15, 16, 17),
"kernel_size": 3,
"scale_factor": 2,
},
{
"in_shape": (13, 14, 15),
"kernel_size": 5,
"scale_factor": 1,
},
{
"in_shape": (29, 30, 31),
"kernel_size": 5,
"scale_factor": 2,
},
{
"in_shape": (53, 54, 55),
"kernel_size": 5,
"scale_factor": 3,
},
),
model_cls=[Unet3dTime, Unet3dSliceTime],
)
def test_output_shape(
self,
in_shape: Tuple[int, int, int],
kernel_size: int,
scale_factor: int,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape.
Args:
in_shape: input shape
scale_factor: convolution stride for down-sampling/up-sampling.
kernel_size: convolution kernel size, the value(s) should be odd.
model_cls: model to be tested.
"""
channels = (2, 4, 2)
@hk.testing.transform_and_run()
def forward(
x: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
t: time.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
num_timesteps=self.num_timesteps,
)
return net(x, t)
rng = jax.random.PRNGKey(0)
rng, rng_t = jax.random.split(rng)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
out = forward(dummy_image, dummy_t)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
@chex.all_variants
@parameterized.named_parameters(
(
"Unet3dTime",
Unet3dTime,
),
("Unet3dSliceTime", Unet3dSliceTime),
)
def test_output_shape_variants(
self,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape under different device variants.
Args:
model_cls: model to be tested.
"""
kernel_size = 3
scale_factor = 2
in_shape = (14, 15, 16)
channels = (2, 4)
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
t: time.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
num_timesteps=self.num_timesteps,
)
return net(x, t)
rng = jax.random.PRNGKey(0)
rng, rng_t = jax.random.split(rng)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
out = forward(dummy_image, dummy_t)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
@parameterized.named_parameters(
(
"Unet3dTime",
Unet3dTime,
),
("Unet3dSliceTime", Unet3dSliceTime),
)
def test_output_real_shape(
self,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape with real setting.
Args:
model_cls: model to be tested.
"""
kernel_size = 3
scale_factor = 2
in_shape = (256, 256, 48)
channels = (2, 2, 2, 2)
@hk.testing.transform_and_run()
def forward(
x: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
t: time.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
num_timesteps=self.num_timesteps,
)
return net(x, t)
rng = jax.random.PRNGKey(0)
rng, rng_t = jax.random.split(rng)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
out = forward(dummy_image, dummy_t)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
| 6,305 | 27.278027 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.