repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
ReconVAT | ReconVAT-master/train_baseline_onset_frame_VAT.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 1000
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
model_complexity = 48
spec = 'Mel'
resume_iteration = None
train_on = 'String'
iteration = 10
alpha = 1
VAT=False
XI= 1e-6
eps=1e-1
VAT_mode = 'all'
model_name = 'onset_frame'
VAT_start = 0
small = True
batch_size = 8
train_batch_size = 8
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
learning_rate = 5e-4
learning_rate_decay_steps = 10000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/baseline_Onset_Frame-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, model_complexity, VAT_mode, VAT_start,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, alpha, model_name, train_batch_size,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT, XI, eps, small):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=False,
dataset=train_on)
MAPS_supervised_set, MAPS_unsupervised_set, MAPS_validation_dataset, _ = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=True,
dataset='MAPS')
supervised_set = ConcatDataset([supervised_set, MAPS_supervised_set])
unsupervised_set = ConcatDataset([unsupervised_set, MAPS_unsupervised_set])
unsupervised_loader = DataLoader(unsupervised_set, batch_size, shuffle=True, drop_last=True)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, len(validation_dataset), shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
if resume_iteration is None:
if model_name=='onset_frame':
model = OnsetsAndFrames_VAT_full(N_BINS, MAX_MIDI - MIN_MIDI + 1, model_complexity=model_complexity,
log=log, mode=mode, spec=spec, XI=XI, eps=eps, VAT_mode=VAT_mode)
elif model_name=='frame':
model = Frame_stack_VAT(N_BINS, MAX_MIDI - MIN_MIDI + 1, model_complexity=model_complexity,
log=log, mode=mode, spec=spec, XI=XI, eps=eps, VAT_mode=VAT_mode)
elif model_name=='onset':
model = Onset_stack_VAT(N_BINS, MAX_MIDI - MIN_MIDI + 1, model_complexity=model_complexity,
log=log, mode=mode, spec=spec, XI=XI, eps=eps, VAT_mode=VAT_mode)
elif model_name=='attention':
model = Frame_stack_attention_VAT(N_BINS, MAX_MIDI - MIN_MIDI + 1, model_complexity=model_complexity,
log=log, mode=mode, spec=spec, XI=XI, eps=eps, VAT_mode=VAT_mode)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
resume_iteration = 0
else: # Loading checkpoints and continue training
trained_dir='trained_MAPS' # Assume that the checkpoint is in this folder
model_path = os.path.join(trained_dir, f'{resume_iteration}.pt')
model = torch.load(model_path)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
optimizer.load_state_dict(torch.load(os.path.join(trained_dir, 'last-optimizer-state.pt')))
summary(model)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
for ep in range(1, epoches+1):
model.train()
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, unsupervised_loader,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
if ep < VAT_start or VAT==False:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, 8, logdir, w_size, writer, False, VAT_start, reconstruction=False)
else:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, 8, logdir, w_size, writer, True, VAT_start, reconstruction=False)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 8,271 | 45.47191 | 136 | py |
ReconVAT | ReconVAT-master/train_UNet_VAT.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 200
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'Wind'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=True
XI= 1e-6
eps=2
small = False
KL_Div = False
reconstruction = False
batch_size = 8
train_batch_size = 1
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 1e-3
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/Unet-recons={reconstruction}-XI={XI}-eps={eps}-alpha={alpha}-train_on=small_{small}_{train_on}-w_size={w_size}-n_heads={n_heads}-lr={learning_rate}-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=True,
dataset=train_on)
# MAPS_supervised_set, MAPS_unsupervised_set, MAPS_validation_dataset, _ = prepare_VAT_dataset(
# sequence_length=sequence_length,
# validation_length=sequence_length,
# refresh=False,
# device=device,
# small=True,
# supersmall=True,
# dataset='MAPS')
# supervised_set = ConcatDataset([supervised_set, MAPS_supervised_set])
# unsupervised_set = ConcatDataset([unsupervised_set, MAPS_unsupervised_set])
if VAT:
unsupervised_loader = DataLoader(unsupervised_set, batch_size, shuffle=True, drop_last=True)
# supervised_set, unsupervised_set = torch.utils.data.random_split(dataset, [100, 39],
# generator=torch.Generator().manual_seed(42))
if len(validation_dataset)>4:
val_batch_size=4
else:
val_batch_size = len(validation_dataset)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, val_batch_size, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
if resume_iteration is None:
model = UNet(ds_ksize,ds_stride, log=log, reconstruction=reconstruction,
mode=mode, spec=spec, device=device, XI=XI, eps=eps)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
resume_iteration = 0
else: # Loading checkpoints and continue training
trained_dir='trained_MAPS' # Assume that the checkpoint is in this folder
model_path = os.path.join(trained_dir, f'{resume_iteration}.pt')
model = torch.load(model_path)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
optimizer.load_state_dict(torch.load(os.path.join(trained_dir, 'last-optimizer-state.pt')))
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
print(f'supervised_loader')
for ep in range(1, epoches+1):
if VAT==True:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, unsupervised_loader,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
else:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, None,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
if ep < VAT_start or VAT==False:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
else:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
True, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 8,580 | 43.926702 | 213 | py |
ReconVAT | ReconVAT-master/model/self_attention_VAT.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
from itertools import cycle
def create_triangular_cycle(start, end, period):
triangle_a = torch.linspace(start,end,period)
triangle_b = torch.linspace(end,start,period)[1:-1]
triangle=torch.cat((triangle_a,triangle_b))
return cycle(triangle)
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, f"out_channels should be divided by groups. (example: out_channels: 40, groups: 4). Now out_channels={self.out_features}, groups={self.groups}"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1)
class stepwise_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div, binwise=False):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
self.binwise = binwise
def forward(self, model, x):
with torch.no_grad():
y_ref, _ = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
y_pred, _ = model(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(y_pred, y_ref)
else:
loss =F.binary_cross_entropy(y_pred, y_ref)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
y_pred, _ = model(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(y_pred, y_ref)
else:
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv, _l2_normalize(d, binwise=self.binwise) # already averaged
class UNet_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div, reconstruction=False):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
self.binwise = False
self.reconstruction = reconstruction
def forward(self, model, x):
with torch.no_grad():
y_ref, _ = model.transcriber(x) # This will be used as a label, therefore no need grad()
# if self.reconstruction:
# pianoroll, _ = model.transcriber(x)
# reconstruction, _ = self.reconstructor(pianoroll)
# pianoroll2_ref, _ = self.transcriber(reconstruction)
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
# if self.reconstruction:
# d2 = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
y_pred, _ = model.transcriber(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(y_pred, y_ref)
else:
loss =F.binary_cross_entropy(y_pred, y_ref)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e10
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
assert torch.isnan(r_adv).any()==False, f"r_adv has nan, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
assert torch.isnan(r_adv).any()==False, f"r_adv has inf, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
# print(f'd max = {d.max()}\td min = {d.min()}')
# print(f'r_adv max = {r_adv.max()}\tr_adv min = {r_adv.min()}')
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
y_pred, _ = model.transcriber(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(y_pred, y_ref)
else:
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv, _l2_normalize(d, binwise=self.binwise) # already averaged
class onset_frame_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
def forward(self, model, x):
with torch.no_grad():
y_ref, _, _ = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=False)
x_adv = (x + r).clamp(0,1)
y_pred, _, _ = model(x_adv)
dist =F.binary_cross_entropy(y_pred, y_ref)
dist.backward() # Calculate gradient wrt d
d = d.grad.detach()
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=False)
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
y_pred, _, _ = model(x_adv)
# print(f'x_adv max = {x_adv.max()}\tx_adv min = {x_adv.min()}')
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv # already averaged
def _l2_normalize(d, binwise):
# input shape (batch, timesteps, bins, ?)
if binwise==True:
d = d/(torch.abs(d)+1e-8)
else:
d = d/(torch.norm(d, dim=-1, keepdim=True))
return d
def binary_kl_div(y_pred, y_ref):
y_pred = torch.clamp(y_pred, 1e-4, 0.9999) # prevent inf in kl_div
y_ref = torch.clamp(y_ref, 1e-4, 0.9999)
q = torch.stack((y_pred, 1-y_pred), -1)
p = torch.stack((y_ref, 1-y_ref), -1)
assert torch.isnan(p.log()).any()==False, "r_adv exploded, please debug tune down the XI for VAT"
assert torch.isinf(p.log()).any()==False, "r_adv vanished, please debug tune up the XI for VAT"
return F.kl_div(p.log(), q, reduction='batchmean')
class VAT_self_attention_1D(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True, XI=1e-5, eps=1e-2,
eps_period=False, eps_max=1, KL_Div=False):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.sequence_model = MutliHeadAttention1D(in_features=input_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm = nn.LayerNorm(model_complexity)
self.linear = nn.Linear(model_complexity, output_features)
self.vat_loss = stepwise_VAT(XI, eps, 1, KL_Div, False)
self.eps_period = eps_period
if self.eps_period:
self.triangular_cycle = create_triangular_cycle(eps,eps_max,eps_period)
def forward(self, spec):
x, a = self.sequence_model(spec)
x = self.layer_norm(x)
x = self.linear(x)
frame_pred = torch.sigmoid(x)
return frame_pred, a
def run_on_batch(self, batch_l, batch_ul=None, VAT=False):
audio_label = batch_l['audio']
onset_label = batch_l['onset']
frame_label = batch_l['frame']
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
frame_pred, a = self(spec)
if self.training:
if self.eps_period:
self.vat_loss.eps = next(self.triangular_cycle)
print(f'eps = {self.vat_loss.eps}')
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class ConvStack(nn.Module):
def __init__(self, output_features):
super().__init__()
# input is batch_size * 1 channel * frames * input_features
self.cnn = nn.Sequential(
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(output_features // 16, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
nn.Conv2d(output_features // 16, output_features // 8, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
)
freq_features = self._get_conv_output()
self.fc = nn.Sequential(
nn.Linear(freq_features, output_features),
nn.Dropout(0.5)
)
def forward(self, spec):
x = self.cnn(spec)
x = x.transpose(1, 2).flatten(-2)
x = self.fc(x)
return x
def _get_conv_output(self):
shape = (1, 640, 229)
bs = 1
input = torch.rand(bs, *shape)
output_feat = self._forward_features(input)
# n_size = output_feat.data.view(bs, -1).size(1)
return output_feat.transpose(1, 2).flatten(-2).size(-1)
def _forward_features(self, x):
x = self.cnn(x)
return x
class Timbral_CNN(nn.Module):
def __init__(self, start_channel, final_channel, output_features):
super().__init__()
self.cnn = nn.Sequential(
# Old
# nn.Conv2d(1, start_channel, (1, 21)),
# nn.BatchNorm2d(start_channel),
# nn.ReLU(),
# # nn.Conv2d(start_channel, start_channel, (1, 51)),
# # nn.ReLU(),
# nn.Conv2d(start_channel, start_channel, (3, 51), padding=(1,0)),
# nn.BatchNorm2d(start_channel),
# nn.ReLU(),
# nn.MaxPool2d((1, 2)),
# # nn.Dropout(0.25),
# # nn.Conv2d(start_channel//2, start_channel//2, (3, 51), padding=(1,0)),
# # nn.ReLU(),
# nn.Conv2d(start_channel, final_channel, (7, 21), padding=(3,0)),
# nn.BatchNorm2d(final_channel),
# nn.ReLU(),
# nn.MaxPool2d((1, 2)),
# # nn.Dropout(0.25),
# # nn.Conv2d(final_channel, final_channel, (7, 51), padding=(3,0)),
# # nn.ReLU(),
# new------------
nn.Conv2d(1, start_channel, (3, 3), padding=1),
nn.BatchNorm2d(start_channel),
nn.ReLU(),
# nn.Conv2d(start_channel, start_channel, (1, 51)),
# nn.ReLU(),
nn.Conv2d(start_channel, start_channel, (3, 3), padding=1),
nn.BatchNorm2d(start_channel),
nn.ReLU(),
nn.MaxPool2d((1, 2)),
# nn.Dropout(0.25),
# nn.Conv2d(start_channel//2, start_channel//2, (3, 51), padding=(1,0)),
# nn.ReLU(),
nn.Conv2d(start_channel, final_channel, (3, 3), padding=1),
nn.BatchNorm2d(final_channel),
nn.ReLU(),
nn.MaxPool2d((1, 2)),
# nn.Dropout(0.25),
# nn.Conv2d(final_channel, final_channel, (7, 51), padding=(3,0)),
# nn.ReLU(),
)
# input is batch_size * 1 channel * frames * input_features
freq_features = self._get_conv_output()
self.fc = nn.Sequential(
nn.Linear(freq_features, output_features),
)
def forward(self, spec):
x = self.cnn(spec)
x = x.transpose(1, 2).flatten(-2)
x = self.fc(x)
return x
def _get_conv_output(self):
shape = (1, 640, 229)
bs = 1
input = torch.rand(bs, *shape)
output_feat = self._forward_features(input)
# n_size = output_feat.data.view(bs, -1).size(1)
return output_feat.transpose(1, 2).flatten(-2).size(-1)
def _forward_features(self, x):
x = self.cnn(x)
return x
class VAT_CNN_attention_1D(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True, XI=1e-5, eps=1e-2, version='a'):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
if version=='a':
self.cnn = ConvStack(output_features)
elif version=='b':
# input is batch_size * 1 channel * frames * input_features
self.cnn = Timbral_CNN(32,8,output_features)
self.sequence_model = MutliHeadAttention1D(in_features=output_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm = nn.LayerNorm(model_complexity)
self.linear = nn.Linear(model_complexity, output_features)
self.vat_loss = stepwise_VAT(XI, eps,1, False)
self.triangular_cycle = create_triangular_cycle(1e-2,10,50)
# def _get_conv_output(self):
# shape = (1, 640, 229)
# bs = 1
# input = torch.rand(bs, *shape)
# output_feat = self._forward_features(input)
# # n_size = output_feat.data.view(bs, -1).size(1)
# return output_feat.size(-1)
# def _forward_features(self, x):
# x = self.cnn(x)
# return x
def forward(self, spec):
x = self.cnn(spec.unsqueeze(1))
# x = x.transpose(1,2).flatten(2)
# 1 Layer = ([8, 8, 640, 687])
x, a = self.sequence_model(x)
x = self.layer_norm(x)
x = self.linear(x)
frame_pred = torch.sigmoid(x)
return frame_pred, a
def run_on_batch(self, batch_l, batch_ul=None, VAT=False):
audio_label = batch_l['audio']
onset_label = batch_l['onset']
frame_label = batch_l['frame']
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
lds_ul, _ = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0)
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if VAT:
lds_l, r_adv = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0)
frame_pred, a = self(spec)
self.vat_loss.eps = next(self.triangular_cycle)
print(f'VAT eps={self.vat_loss.eps}')
# print(f'loss = {F.binary_cross_entropy(frame_pred, frame_label)}')
if self.training:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class VAT_CNN_attention_onset_frame(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True, XI=1e-5, eps=1e-2):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
start_channel = 48
final_channel = 96
# input is batch_size * 1 channel * frames * input_features
self.cnn = Timbral_CNN(start_channel,final_channel,output_features)
self.onset_timbral_cnn = Timbral_CNN(start_channel, final_channel, output_features)
freq_features = self._get_conv_output()
self.onset_attention = MutliHeadAttention1D(in_features=output_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm_onset = nn.LayerNorm(model_complexity)
self.onset_classifier = nn.Linear(model_complexity, output_features)
self.final_attention = MutliHeadAttention1D(in_features=2*output_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm_final = nn.LayerNorm(model_complexity)
self.final_classifier = nn.Linear(model_complexity, output_features)
self.vat_loss = onset_frame_VAT(XI, eps,1)
def _get_conv_output(self):
shape = (1, 640, 229)
bs = 1
input = torch.rand(bs, *shape)
output_feat = self._forward_features(input)
# n_size = output_feat.data.view(bs, -1).size(1)
return output_feat.size(-1)
def _forward_features(self, x):
x = self.cnn(x)
return x
def forward(self, spec):
onset_pred = self.onset_timbral_cnn(spec.unsqueeze(1))
onset_pred, _ = self.onset_attention(onset_pred)
# onset_pred, _ = self.onset_attention(spec)
onset_pred = self.layer_norm_onset(onset_pred)
onset_pred = self.onset_classifier(onset_pred)
onset_pred = torch.sigmoid(onset_pred)
activation = self.cnn(spec.unsqueeze(1))
# activation shape = (8, 8, 640, freq_freatures)
# activation shape = (8, 640, freq_freatures*8)
# 1 Layer = ([8, 8, 640, 687])
x, a = self.final_attention(torch.cat((onset_pred, activation), dim=-1))
x = self.layer_norm_final(x)
x = self.final_classifier(x)
frame_pred = torch.sigmoid(x)
return frame_pred, onset_pred, a
def run_on_batch(self, batch_l, batch_ul=None, VAT=False):
audio_label = batch_l['audio']
onset_label = batch_l['onset']
frame_label = batch_l['frame']
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
lds_ul, _ = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if VAT:
lds_l, r_adv = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
frame_pred, onset_pred, a = self(spec)
# print(f'loss = {F.binary_cross_entropy(frame_pred, frame_label)}')
if self.training:
predictions = {
'onset': onset_pred.reshape(*onset_pred.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul
}
else:
predictions = {
'onset': onset_pred.reshape(*onset_pred.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/test_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
batchNorm_momentum = 0.1
num_instruments = 1
class block(nn.Module):
def __init__(self, inp, out, ksize, pad, ds_ksize, ds_stride):
super(block, self).__init__()
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, padding=pad)
self.bn1 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.conv2 = nn.Conv2d(out, out, kernel_size=ksize, padding=pad)
self.bn2 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.skip = nn.Conv2d(inp, out, kernel_size=1, padding=0)
self.ds = nn.Conv2d(out, out, kernel_size=ds_ksize, stride=ds_stride, padding=0)
def forward(self, x):
x11 = F.leaky_relu(self.bn1(self.conv1(x)))
x12 = F.leaky_relu(self.bn2(self.conv2(x11)))
x12 += self.skip(x)
xp = self.ds(x12)
return xp, xp, x12.size()
class d_block(nn.Module):
def __init__(self, inp, out, isLast, ksize, pad, ds_ksize, ds_stride):
super(d_block, self).__init__()
self.conv2d = nn.ConvTranspose2d(inp, int(inp/2), kernel_size=ksize, padding=pad)
self.bn2d = nn.BatchNorm2d(int(inp/2), momentum= batchNorm_momentum)
self.conv1d = nn.ConvTranspose2d(int(inp/2), out, kernel_size=ksize, padding=pad)
if not isLast:
self.bn1d = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.us = nn.ConvTranspose2d(inp-out, inp-out, kernel_size=ds_ksize, stride=ds_stride)
else:
self.us = nn.ConvTranspose2d(inp, inp, kernel_size=ds_ksize, stride=ds_stride)
def forward(self, x, size=None, isLast=None, skip=None):
# print(f'x.shape={x.shape}')
# print(f'target shape = {size}')
x = self.us(x,output_size=size)
if not isLast: x = torch.cat((x, skip), 1)
x = F.leaky_relu(self.bn2d(self.conv2d(x)))
if isLast: x = self.conv1d(x)
else: x = F.leaky_relu(self.bn1d(self.conv1d(x)))
return x
class Encoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Encoder, self).__init__()
self.block1 = block(1,16,(3,3),(1,1),ds_ksize, ds_stride)
self.block2 = block(16,32,(3,3),(1,1),ds_ksize, ds_stride)
self.block3 = block(32,64,(3,3),(1,1),ds_ksize, ds_stride)
self.block4 = block(64,128,(3,3),(1,1),ds_ksize, ds_stride)
self.conv1 = nn.Conv2d(64,64, kernel_size=(3,3), padding=(1,1))
self.conv2 = nn.Conv2d(32,32, kernel_size=(3,3), padding=(1,1))
self.conv3 = nn.Conv2d(16,16, kernel_size=(3,3), padding=(1,1))
def forward(self, x):
x1,idx1,s1 = self.block1(x)
x2,idx2,s2 = self.block2(x1)
x3,idx3,s3 = self.block3(x2)
x4,idx4,s4 = self.block4(x3)
c1=self.conv1(x3)
c2=self.conv2(x2)
c3=self.conv3(x1)
return x4,[s1,s2,s3,s4],[c1,c2,c3,x1]
class Decoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Decoder, self).__init__()
self.d_block1 = d_block(192,64,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block2 = d_block(96,32,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block3 = d_block(48,16,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block4 = d_block(16,num_instruments,True,(3,3),(1,1),ds_ksize, ds_stride)
def forward(self, x, s, c=[None,None,None,None]):
x = self.d_block1(x,s[3],False,c[0])
x = self.d_block2(x,s[2],False,c[1])
x = self.d_block3(x,s[1],False,c[2])
x = self.d_block4(x,s[0],True,c[3])
# reconsturction = torch.sigmoid(self.d_block4(x,s[0],True,c[3]))
# return torch.sigmoid(x) # This is required to boost the accuracy
return x # This is required to boost the accuracy
class Spec2Roll(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
self.Unet1_decoder = Decoder(ds_ksize, ds_stride)
self.lstm1 = MutliHeadAttention1D(N_BINS, N_BINS*complexity, 31, position=True, groups=complexity)
# self.lstm1 = nn.LSTM(N_BINS, N_BINS, batch_first=True, bidirectional=True)
self.linear1 = nn.Linear(N_BINS*complexity, 88)
def forward(self, x):
# U-net 1
x,s,c = self.Unet1_encoder(x)
x = self.Unet1_decoder(x,s,c)
x, a = self.lstm1(x.squeeze(1)) # remove the channel dim
pianoroll = torch.sigmoid(self.linear1(x)) # Use the full LSTM output
return pianoroll, a
class Roll2Spec(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet2_encoder = Encoder(ds_ksize, ds_stride)
self.Unet2_decoder = Decoder(ds_ksize, ds_stride)
# self.lstm2 = nn.LSTM(88, N_BINS, batch_first=True, bidirectional=True)
self.lstm2 = MutliHeadAttention1D(88, N_BINS*complexity, 31, position=True, groups=4)
self.linear2 = nn.Linear(N_BINS*complexity, N_BINS)
def forward(self, x):
# U-net 2
x, a = self.lstm2(x)
x= torch.sigmoid(self.linear2(x)) # ToDo, remove the sigmoid activation and see if we get a better result
x,s,c = self.Unet2_encoder(x.unsqueeze(1))
reconstruction = self.Unet2_decoder(x,s,c) # predict roll
# x,s,c = self.Unet2_encoder(x.unsqueeze(1))
# x = self.Unet2_decoder(x,s,c) # predict roll
# x, a = self.lstm2(x.squeeze(1))
# reconstruction = self.linear2(x) # ToDo, remove the sigmoid activation and see if we get a better result
# reconstruction = reconstruction.clamp(0,1).unsqueeze(1)
return reconstruction, a
class Reconstructor(nn.Module):
def __init__(self, ds_ksize, ds_stride):
super().__init__()
self.reconstructor = Roll2Spec(ds_ksize, ds_stride)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.normalize = Normalization('imagewise')
def forward(self, x):
reconstruction, a = self.reconstructor(x)
return reconstruction, a
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
reconstrut, a = self(frame_label)
predictions = {
'attention': a,
'reconstruction': reconstrut,
}
losses = {
'loss/train_reconstruction': F.binary_cross_entropy(reconstrut.squeeze(1), spec.squeeze(1).detach()),
}
return predictions, losses, spec.squeeze(1)
class UNet(nn.Module):
def __init__(self, ds_ksize, ds_stride, log=True, reconstruction=True, mode='imagewise', spec='CQT', device='cpu', XI=1e-6, eps=1e-2):
super().__init__()
global N_BINS # using the N_BINS parameter from constant.py
# Selecting the type of spectrogram to use
if spec == 'CQT':
r=2
N_BINS = 88*r
self.spectrogram = Spectrogram.CQT1992v2(sr=SAMPLE_RATE, hop_length=HOP_LENGTH,
n_bins=N_BINS, fmin=27.5,
bins_per_octave=12*r, trainable=False)
elif spec == 'Mel':
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
elif spec == 'CFP':
self.spectrogram = Spectrogram.CFP(fs=SAMPLE_RATE,
fr=4,
window_size=WINDOW_LENGTH,
hop_length=HOP_LENGTH,
fc=MEL_FMIN,
tc=1/MEL_FMAX)
N_BINS = self.spectrogram.quef2logfreq_matrix.shape[0]
else:
print(f'Please select a correct spectrogram')
self.log = log
self.normalize = Normalization(mode)
self.reconstruction = reconstruction
self.vat_loss = UNet_VAT(XI, eps, 1, False)
# self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
# self.Unet1_decoder = Decoder(ds_ksize, ds_stride)
# self.lstm1 = MutliHeadAttention1D(N_BINS, N_BINS*4, 31, position=True, groups=4)
# # self.lstm1 = nn.LSTM(N_BINS, N_BINS, batch_first=True, bidirectional=True)
# self.linear1 = nn.Linear(N_BINS*4, 88)
self.transcriber = Spec2Roll(ds_ksize, ds_stride)
if reconstruction==True:
# self.Unet2_encoder = Encoder(ds_ksize, ds_stride)
# self.Unet2_decoder = Decoder(ds_ksize, ds_stride)
# # self.lstm2 = nn.LSTM(88, N_BINS, batch_first=True, bidirectional=True)
# self.lstm2 = MutliHeadAttention1D(88, N_BINS*4, 31, position=True, groups=4)
# self.linear2 = nn.Linear(N_BINS*4, N_BINS)
self.reconstructor = Roll2Spec(ds_ksize, ds_stride)
def forward(self, x):
# U-net 1
pianoroll, a = self.transcriber(x)
if self.reconstruction:
# U-net 2
reconstruction, a_reconstruct = self.reconstructor(pianoroll)
# Applying U-net 1 to the reconstructed spectrograms
pianoroll2, a_2 = self.transcriber(reconstruction)
# # U-net2
# x, h = self.lstm2(pianoroll)
# feat2= torch.sigmoid(self.linear2(x)) # ToDo, remove the sigmoid activation and see if we get a better result
# x,s,c = self.Unet2_encoder(feat2.unsqueeze(1))
# reconstruction = self.Unet2_decoder(x,s,c) # predict roll
# # Applying U-net 1 to the reconstructed spectrograms
# x,s,c = self.Unet1_encoder(reconstruction)
# feat1b = self.Unet1_decoder(x,s,c)
# x, h = self.lstm1(feat1b.squeeze(1)) # remove the channel dim
# pianoroll2 = torch.sigmoid(self.linear1(x)) # Use the full LSTM output
return reconstruction, pianoroll, pianoroll2, a
else:
return pianoroll, a
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2).unsqueeze(1)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
r_adv = r_adv.squeeze(1) # remove the channel dimension
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
if self.reconstruction:
reconstrut, pianoroll, pianoroll2, a = self(spec)
if self.training:
predictions = {
'onset': pianoroll,
'frame': pianoroll,
'frame2':pianoroll2,
'onset2':pianoroll2,
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/train_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': pianoroll.reshape(*frame_label.shape),
'frame': pianoroll.reshape(*frame_label.shape),
'frame2':pianoroll2.reshape(*frame_label.shape),
'onset2':pianoroll2.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/test_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
else:
frame_pred, a = self(spec)
if self.training:
predictions = {
'onset': frame_pred,
'frame': frame_pred,
'r_adv': r_adv,
'attention': a,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
'attention': a,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
def run_on_batch_application(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2).unsqueeze(1)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
reconstrut, ul_pianoroll, ul_pianoroll2, a = self(spec)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
r_adv = r_adv.squeeze(1) # remove the channel dimension
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
reconstrut, pianoroll, pianoroll2, a = self(spec)
if self.training:
predictions = {
'onset': pianoroll,
'frame': pianoroll,
'frame2':pianoroll2,
'onset2':pianoroll2,
'ul_frame': ul_pianoroll,
'ul_frame2': ul_pianoroll2,
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/train_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
# 'loss/ul_consistency_wrt2': F.binary_cross_entropy(predictions['ul_frame'].squeeze(1), predictions['ul_frame2'].squeeze(1).detach()),
'loss/ul_consistency_wrt1': F.binary_cross_entropy(predictions['ul_frame2'].squeeze(1), predictions['ul_frame'].squeeze(1).detach()),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': pianoroll.reshape(*frame_label.shape),
'frame': pianoroll.reshape(*frame_label.shape),
'frame2':pianoroll2.reshape(*frame_label.shape),
'onset2':pianoroll2.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/test_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
def transcribe(self, batch):
audio_label = batch['audio']
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
reconstrut, pianoroll, pianoroll2, a = self(spec)
predictions = {
'onset': pianoroll,
'frame': pianoroll,
}
return predictions
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
| 56,736 | 41.788084 | 196 | py |
ReconVAT | ReconVAT-master/model/VAT.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
class stepwise_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
def forward(self, model, x):
with torch.no_grad():
y_ref, _ = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d)
y_pred, _ = model(x + r)
dist =F.binary_cross_entropy(y_pred, y_ref)
dist.backward() # Calculate gradient wrt d
d = d.grad.detach()
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d)
# logit_p = logit.detach()
y_pred, _ = model(x + r_adv)
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv # already averaged
def _l2_normalize(d):
d = d/torch.norm(d, dim=2, keepdim=True)
return d | 1,478 | 32.613636 | 88 | py |
ReconVAT | ReconVAT-master/model/Unet_prestack.py | import torch
from torch.nn.functional import conv1d, mse_loss
import torch.nn.functional as F
import torch.nn as nn
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
batchNorm_momentum = 0.1
num_instruments = 1
class block(nn.Module):
def __init__(self, inp, out, ksize, pad, ds_ksize, ds_stride):
super(block, self).__init__()
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, padding=pad)
self.bn1 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.conv2 = nn.Conv2d(out, out, kernel_size=ksize, padding=pad)
self.bn2 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.skip = nn.Conv2d(inp, out, kernel_size=1, padding=0)
self.ds = nn.Conv2d(out, out, kernel_size=ds_ksize, stride=ds_stride, padding=0)
def forward(self, x):
x11 = F.leaky_relu(self.bn1(self.conv1(x)))
x12 = F.leaky_relu(self.bn2(self.conv2(x11)))
x12 += self.skip(x)
xp = self.ds(x12)
return xp, xp, x12.size()
class d_block(nn.Module):
def __init__(self, inp, out, isLast, ksize, pad, ds_ksize, ds_stride):
super(d_block, self).__init__()
self.conv2d = nn.ConvTranspose2d(inp, int(inp/2), kernel_size=ksize, padding=pad)
self.bn2d = nn.BatchNorm2d(int(inp/2), momentum= batchNorm_momentum)
self.conv1d = nn.ConvTranspose2d(int(inp/2), out, kernel_size=ksize, padding=pad)
if not isLast:
self.bn1d = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.us = nn.ConvTranspose2d(inp-out, inp-out, kernel_size=ds_ksize, stride=ds_stride)
else:
self.us = nn.ConvTranspose2d(inp, inp, kernel_size=ds_ksize, stride=ds_stride)
def forward(self, x, size=None, isLast=None, skip=None):
# print(f'x.shape={x.shape}')
# print(f'target shape = {size}')
x = self.us(x,output_size=size)
if not isLast: x = torch.cat((x, skip), 1)
x = F.leaky_relu(self.bn2d(self.conv2d(x)))
if isLast: x = self.conv1d(x)
else: x = F.leaky_relu(self.bn1d(self.conv1d(x)))
return x
class Encoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Encoder, self).__init__()
self.block1 = block(1,16,(3,3),(1,1),ds_ksize, ds_stride)
self.block2 = block(16,32,(3,3),(1,1),ds_ksize, ds_stride)
self.block3 = block(32,64,(3,3),(1,1),ds_ksize, ds_stride)
self.block4 = block(64,128,(3,3),(1,1),ds_ksize, ds_stride)
self.conv1 = nn.Conv2d(64,64, kernel_size=(3,3), padding=(1,1))
self.conv2 = nn.Conv2d(32,32, kernel_size=(3,3), padding=(1,1))
self.conv3 = nn.Conv2d(16,16, kernel_size=(3,3), padding=(1,1))
def forward(self, x):
x1,idx1,s1 = self.block1(x)
x2,idx2,s2 = self.block2(x1)
x3,idx3,s3 = self.block3(x2)
x4,idx4,s4 = self.block4(x3)
c1=self.conv1(x3)
c2=self.conv2(x2)
c3=self.conv3(x1)
return x4,[s1,s2,s3,s4],[c1,c2,c3,x1]
class Decoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Decoder, self).__init__()
self.d_block1 = d_block(192,64,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block2 = d_block(96,32,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block3 = d_block(48,16,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block4 = d_block(16,num_instruments,True,(3,3),(1,1),ds_ksize, ds_stride)
def forward(self, x, s, c=[None,None,None,None]):
x = self.d_block1(x,s[3],False,c[0])
x = self.d_block2(x,s[2],False,c[1])
x = self.d_block3(x,s[1],False,c[2])
x = self.d_block4(x,s[0],True,c[3])
# reconsturction = torch.sigmoid(self.d_block4(x,s[0],True,c[3]))
# return torch.sigmoid(x) # This is required to boost the accuracy
return x # This is required to boost the accuracy
class Prestack(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
self.Unet1_decoder = Decoder(ds_ksize, ds_stride)
def forward(self, x):
# U-net 1
x,s,c = self.Unet1_encoder(x)
x = self.Unet1_decoder(x,s,c)
return x
class Prestack_Model(nn.Module):
def __init__(self, model='resnet18'):
super().__init__()
unet = Prestack((3,3),(1,1))
resnet = torch.hub.load('pytorch/vision:v0.9.0', model, pretrained=False)
resnet.conv1 = torch.nn.Conv1d(1, 64, (7, 7), (2, 2), (3, 3), bias=False)
resnet.fc = torch.nn.Linear(512, 88, bias=True)
self.prestack_model = nn.Sequential(unet, resnet)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.normalize = Normalization('imagewise')
def forward(self, x):
return self.prestack_model(x)
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# Change the shape such that it fits Thickstun Model
spec_padded = torch.nn.functional.pad(spec, (12, 12)) # (batch, 229, 640+24)
spec_padded = spec_padded.unfold(2, 25, 1) # extract 25 timesteps from the padded spec, stride=1, dim=2
spec_padded = spec_padded.transpose(1,2).reshape(-1, 229, 25) # Cut spectrogram into segments as a batch
spec_padded = spec_padded.unsqueeze(1) # create 1 channel for CNN
# print(f'spec_padded shape = {spec_padded.shape}')
frame_pred = torch.zeros(spec_padded.shape[0], 88).to(spec_padded.device)
for idx, i in enumerate(spec_padded):
output = self(i.unsqueeze(0)).squeeze(0)
frame_pred[idx] = output
# print(f'idx = {idx}\tfoward done = {output.shape}')
frame_pred = torch.sigmoid(frame_pred)
# frame_pred = torch.sigmoid(self(spec_padded))
# print(f'frame_pred max = {frame_pred.max()}\tframe_pred min = {frame_pred.min()}')
predictions = {
'onset': frame_pred,
'frame': frame_pred,
'r_adv': None
}
try:
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label.reshape(-1,88)),
}
except:
print('The prediction contains negative values')
print(f'frame_pred min = {frame_pred.min()}')
print(f'frame_pred max = {frame_pred.max()}')
return predictions, losses, spec.squeeze(1) | 7,503 | 41.636364 | 124 | py |
ReconVAT | ReconVAT-master/model/onset_frame_VAT.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn.functional as F
from torch import nn
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
from torch.nn.utils import clip_grad_norm_
import torch.nn.init as init
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1)
# class stepwise_VAT(nn.Module):
# """
# We define a function of regularization, specifically VAT.
# """
# def __init__(self, XI, epsilon, n_power, VAT_mode):
# super().__init__()
# self.n_power = n_power
# self.XI = XI
# self.epsilon = epsilon
# self.VAT_mode = VAT_mode
# def forward(self, model, x):
# with torch.no_grad():
# onset_ref, activation_ref, frame_ref = model(x) # This will be used as a label, therefore no need grad()
# # generate_virtual_adversarial_perturbation
# d = torch.randn_like(x, requires_grad=True) # Need gradient
# for _ in range(self.n_power):
# r = self.XI * _l2_normalize(d)
# onset_pred, activation_pred, frame_pred = model(x + r)
# dist_onset =F.binary_cross_entropy(onset_pred, onset_ref)
# dist_activation =F.binary_cross_entropy(activation_pred, activation_ref)
# dist_frame =F.binary_cross_entropy(frame_pred, frame_ref)
# if self.VAT_mode == 'onset':
# dist = dist_onset
# elif self.VAT_mode == 'activation':
# dist = dist_activation
# elif self.VAT_mode == 'frame':
# dist = dist_frame
# elif self.VAT_mode == 'all':
# dist = dist_frame + dist_activation + dist_onset
# dist.backward() # Calculate gradient wrt d
# d = d.grad.detach()
# model.zero_grad() # prevent gradient change in the model
# # generating virtual labels and calculate VAT
# r_adv = self.epsilon * _l2_normalize(d)
# onset_pred, activation_pred, frame_pred = model(x + r_adv)
# vat_onset =F.binary_cross_entropy(onset_pred, onset_ref)
# vat_activation =F.binary_cross_entropy(activation_pred, activation_ref)
# vat_frame =F.binary_cross_entropy(frame_pred, frame_ref)
# if self.VAT_mode == 'onset':
# vat_loss = vat_onset
# elif self.VAT_mode == 'activation':
# vat_loss = vat_activation
# elif self.VAT_mode == 'frame':
# vat_loss = vat_frame
# elif self.VAT_mode == 'all':
# vat_loss = vat_frame + vat_activation + vat_onset
# return vat_loss, r_adv # already averaged
def binary_kl_div(y_pred, y_ref):
y_pred = torch.clamp(y_pred, 0, 0.9999) # prevent inf in kl_div
y_ref = torch.clamp(y_ref, 0, 0.9999)
q = torch.stack((y_pred, 1-y_pred), -1)
p = torch.stack((y_ref, 1-y_ref), -1)
return F.kl_div(p.log(), q, reduction='batchmean')
class stepwise_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
if KL_Div==True:
self.binwise = False
else:
self.binwise = False
def forward(self, model, x):
with torch.no_grad():
onset_ref, activation_ref, frame_ref = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
onset_pred, activation_pred, frame_pred = model(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(frame_pred, frame_ref)
else:
loss =F.binary_cross_entropy(frame_pred, frame_ref)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e10
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
assert torch.isnan(r_adv).any()==False, "r_adv contains nan"
assert torch.isinf(r_adv).any()==False, "r_adv contains nan"
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
onset_pred, activation_pred, frame_pred = model(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(frame_pred, frame_ref)
else:
vat_loss = F.binary_cross_entropy(frame_pred, frame_ref)
return vat_loss, r_adv, _l2_normalize(d*1e8, binwise=self.binwise) # already averaged
class stepwise_VAT_frame_stack(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, VAT_mode):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.VAT_mode = VAT_mode
def forward(self, model, x):
with torch.no_grad():
activation_ref, frame_ref = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, False)
x_adv = (x+r).clamp(0,1)
activation_pred, frame_pred = model(x_adv)
dist_activation = F.mse_loss(activation_pred, activation_ref)
dist_frame = F.binary_cross_entropy(frame_pred, frame_ref)
if self.VAT_mode == 'activation':
dist = dist_activation
elif self.VAT_mode == 'frame':
dist = dist_frame
elif self.VAT_mode == 'all':
dist = dist_frame + dist_activation
dist.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e20
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
# print(f'dist = {dist}')
# print(f'd mean = {d.mean()}\std = {d.std()}')
# print(f'd norm mean = {_l2_normalize(d).mean()}\tstd = {_l2_normalize(d).std()}')
r_adv = self.epsilon * _l2_normalize(d, False)
assert torch.isnan(r_adv).any()==False, "r_adv exploded, please debug tune down the XI for VAT"
assert torch.isinf(r_adv).any()==False, "r_adv vanished, please debug tune up the XI for VAT"
x_adv = (x+r_adv).clamp(0,1)
activation_pred, frame_pred = model(x_adv)
assert torch.isnan(activation_pred).any()==False, "activation_pred is nan, please debug"
assert torch.isnan(frame_pred).any()==False, "frame_pred is nan, please debug"
vat_activation =F.mse_loss(activation_pred, activation_ref)
vat_frame =F.binary_cross_entropy(frame_pred, frame_ref)
if self.VAT_mode == 'activation':
vat_loss = vat_activation
elif self.VAT_mode == 'frame':
vat_loss = vat_frame
elif self.VAT_mode == 'all':
vat_loss = vat_frame + vat_activation
return vat_loss, r_adv # already averaged
class stepwise_VAT_onset_stack(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, VAT_mode):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.VAT_mode = VAT_mode
def forward(self, model, x):
with torch.no_grad():
onset_ref = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d)
onset_pred = model(x + r)
dist = F.binary_cross_entropy(onset_pred, onset_ref)
dist.backward() # Calculate gradient wrt d
d = d.grad.detach()
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d)
assert torch.isnan(r_adv).any()==False, "r_adv exploded, please debug tune down the XI for VAT"
assert torch.isinf(r_adv).any()==False, "r_adv vanished, please debug tune up the XI for VAT"
onset_pred = model(x + r_adv)
assert torch.isnan(activation_pred).any()==False, "activation_pred is nan, please debug"
assert torch.isnan(frame_pred).any()==False, "frame_pred is nan, please debug"
vat_loss = F.binary_cross_entropy(onset_pred, onset_ref)
return vat_loss, r_adv # already averaged
def _l2_normalize(d, binwise):
# input shape (batch, timesteps, bins, ?)
if binwise==True:
d = d/(torch.abs(d)+1e-8)
else:
d = d/(torch.norm(d, dim=-1, keepdim=True))
return d
class ConvStack(nn.Module):
def __init__(self, input_features, output_features):
super().__init__()
# input is batch_size * 1 channel * frames * input_features
self.cnn = nn.Sequential(
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(output_features // 16, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
nn.Conv2d(output_features // 16, output_features // 8, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
)
self.fc = nn.Sequential(
nn.Linear((output_features // 8) * (input_features // 4), output_features),
nn.Dropout(0.5)
)
def forward(self, spec):
x = spec.view(spec.size(0), 1, spec.size(1), spec.size(2))
x = self.cnn(x)
x = x.transpose(1, 2).flatten(-2)
x = self.fc(x)
return x
class Onset_Stack(nn.Module):
def __init__(self, input_features, model_size, output_features, sequence_model):
super().__init__()
self.convstack = ConvStack(input_features, model_size)
self.sequence_model = sequence_model
if self.sequence_model:
self.linear = nn.Linear(model_size, output_features)
self.forward = self.forward_LSTM
else:
self.linear = nn.Linear(model_size, output_features)
self.forward = self.forward_noLSTM
def forward_LSTM(self, x):
x = self.convstack(x)
if self.training:
x, (h, c) = self.sequence_model(x)
else:
self.train()
x, (h, c) = self.sequence_model(x)
self.eval()
x = self.linear(x)
return torch.sigmoid(x)
def forward_noLSTM(self, x):
x = self.convstack(x)
x = self.linear(x)
return torch.sigmoid(x)
class Combine_Stack(nn.Module):
def __init__(self, model_size, output_features, sequence_model):
super().__init__()
self.sequence_model = sequence_model
if self.sequence_model:
self.linear = nn.Linear(model_size, output_features)
self.forward = self.forward_LSTM
else:
self.linear = nn.Linear(output_features, output_features)
self.forward = self.forward_noLSTM
def forward_LSTM(self, x):
if self.training:
x, _ = self.sequence_model(x)
else:
self.train()
x, _ = self.sequence_model(x)
self.eval()
x = self.linear(x)
return torch.sigmoid(x)
def forward_noLSTM(self,x):
x = self.linear(x)
return torch.sigmoid(x)
class Frame_stack_VAT(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, log=True, mode='imagewise', spec='Mel', XI=1e-5, eps=10, VAT_mode='all'):
super().__init__()
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
model_size = model_complexity * 16
sequence_model = lambda input_size, output_size: nn.LSTM(input_size, output_size // 2, batch_first=True, bidirectional=True)
# sequence_model = lambda input_size, output_size: MutliHeadAttention1D(input_size, output_size, 31, position=True, groups=4)
self.vat_loss = stepwise_VAT_frame_stack(XI, eps,1, VAT_mode)
self.combined_stack = Combine_Stack(model_size, output_features, sequence_model(output_features, model_size))
# self.combined_stack = Combine_Stack(model_size, output_features, None)
self.frame_stack = nn.Sequential(
ConvStack(input_features, model_size),
nn.Linear(model_size, output_features),
nn.Sigmoid()
)
def forward(self, spec):
activation_pred = self.frame_stack(spec)
combined_pred = activation_pred
frame_pred = self.combined_stack(combined_pred)
# velocity_pred = self.velocity_stack(mel)
return activation_pred, frame_pred
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
frame_label = batch['frame']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if batch_ul and VAT:
audio_label_ul = batch_ul['audio']
spec_ul = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec_ul = torch.log(spec_ul + 1e-5)
spec_ul = self.normalize.transform(spec_ul)
spec_ul = spec.transpose(-1,-2)
lds_ul, _ = self.vat_loss(self, spec_ul)
else:
lds_ul = torch.tensor(0.)
if VAT:
lds_l, r_adv = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
activation_pred, frame_pred = self(spec)
predictions = {
'onset': frame_pred.reshape(*frame_pred.shape),
# 'offset': offset_pred.reshape(*offset_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv
}
# no need
if self.training:
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_LDS': (lds_ul+lds_l)/2
}
else:
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/test_LDS': lds_l
}
return predictions, losses, spec
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class Onset_stack_VAT(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, log=True, mode='imagewise', spec='Mel', XI=1e-5, eps=10, VAT_mode='all'):
super().__init__()
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
model_size = model_complexity * 16
sequence_model = lambda input_size, output_size: nn.LSTM(input_size, output_size // 2, batch_first=True, bidirectional=True)
self.vat_loss = stepwise_VAT_onset_stack(XI, eps, 1, VAT_mode)
self.onset_stack = Onset_Stack(input_features, model_size, output_features, sequence_model(model_size, model_size))
def forward(self, spec):
onset_pred = self.onset_stack(spec)
return onset_pred
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if batch_ul and VAT:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec_ul = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
lds_ul, _ = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
if VAT:
lds_l, r_adv = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
onset_pred = self(spec)
accuracy = (onset_label == (onset_pred>0.5)).float().sum()/onset_label.flatten(0).shape[0]
predictions = {
'onset': onset_pred.reshape(*onset_pred.shape),
'r_adv': r_adv
}
# no need
if self.training:
losses = {
'loss/train_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'metric/train_accuracy': accuracy,
'loss/train_LDS': torch.mean(torch.stack((lds_ul,lds_l)),dim=0)
}
else:
losses = {
'loss/test_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'metric/test_accuracy': accuracy,
'loss/test_LDS': lds_l
}
return predictions, losses, spec
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class OnsetsAndFrames_VAT_full(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, log=True, mode='imagewise', spec='Mel', XI=1e-5, eps=10, VAT_mode='all'):
super().__init__()
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
model_size = model_complexity * 16
sequence_model = lambda input_size, output_size: nn.LSTM(input_size, output_size // 2, batch_first=True, bidirectional=True)
# Need to rewrite this part, since we are going to modify the LSTM
self.vat_loss = stepwise_VAT(XI, eps,1, False)
self.onset_stack = Onset_Stack(input_features, model_size, output_features, sequence_model(model_size, model_size))
self.combined_stack = Combine_Stack(model_size, output_features, sequence_model(output_features * 2, model_size))
self.frame_stack = nn.Sequential(
ConvStack(input_features, model_size),
nn.Linear(model_size, output_features),
nn.Sigmoid()
)
def forward(self, spec):
onset_pred = self.onset_stack(spec)
# offset_pred = self.offset_stack(mel)
activation_pred = self.frame_stack(spec)
combined_pred = torch.cat([onset_pred.detach(), activation_pred], dim=-1)
frame_pred = self.combined_stack(combined_pred)
# velocity_pred = self.velocity_stack(mel)
return onset_pred, activation_pred, frame_pred
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
if self.onset_stack:
onset_label = batch['onset']
# offset_label = batch['offset']
frame_label = batch['frame']
# velocity_label = batch['velocity']
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
# print(f'run_batch label = {frame_label.shape}')
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
onset_pred, activation_pred, frame_pred = self(spec)
if self.training:
predictions = {
'onset': onset_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': onset_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/test_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec
return predictions, losses, spec
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
| 29,455 | 39.685083 | 150 | py |
ReconVAT | ReconVAT-master/model/constants.py | import torch
SAMPLE_RATE = 16000
HOP_LENGTH = SAMPLE_RATE * 32 // 1000
ONSET_LENGTH = SAMPLE_RATE * 32 // 1000
OFFSET_LENGTH = SAMPLE_RATE * 32 // 1000
HOPS_IN_ONSET = ONSET_LENGTH // HOP_LENGTH
HOPS_IN_OFFSET = OFFSET_LENGTH // HOP_LENGTH
MIN_MIDI = 21
MAX_MIDI = 108
N_BINS = 229 # Default using Mel spectrograms
MEL_FMIN = 30
MEL_FMAX = SAMPLE_RATE // 2
# New parameter for Guqin
# N_BINS = 400 # Default using Mel spectrograms
# MEL_FMIN = 20
# MEL_FMAX = SAMPLE_RATE // 2
WINDOW_LENGTH = 2048
#DEFAULT_DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
| 570 | 20.961538 | 64 | py |
ReconVAT | ReconVAT-master/model/Thickstun_model.py | import torch
from torch.nn.functional import conv1d, mse_loss
import torch.nn.functional as F
import torch.nn as nn
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
class Thickstun(torch.nn.Module):
def __init__(self):
super(Thickstun, self).__init__()
# Create filter windows
# Creating Layers
self.normalize = Normalization('imagewise')
k_out = 128
k2_out = 4096
self.CNN_freq = nn.Conv2d(1,k_out,
kernel_size=(128,1),stride=(2,1))
self.CNN_time = nn.Conv2d(k_out,k2_out,
kernel_size=(1,25),stride=(1,1))
self.linear = torch.nn.Linear(k2_out*51, 88, bias=False)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
# Initialize weights
# Do something
def forward(self,x):
z2 = torch.relu(self.CNN_freq(x.unsqueeze(1))) # Make channel as 1 (N,C,H,W) shape = [10, 128, 193, 25]
# print(f'z2 = {z2.shape}')
z3 = torch.relu(self.CNN_time(z2)) # shape = [10, 256, 193, 1]
# print(f'z3 = {z3.shape}')
y = self.linear(torch.relu(torch.flatten(z3,1)))
return torch.sigmoid(y)
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# Change the shape such that it fits Thickstun Model
spec_padded = torch.nn.functional.pad(spec, (12, 12)) # (batch, 229, 640+24)
spec_padded = spec_padded.unfold(2, 25, 1) # extract 25 timesteps from the padded spec, stride=1, dim=2
spec_padded = spec_padded.transpose(1,2).reshape(-1, 229, 25) # Cut spectrogram into segments as a batch
frame_pred = self(spec_padded)
# print(f'output shape = {frame_pred.shape}')
# print(f'label shape = {frame_label.shape}')
predictions = {
'onset': frame_pred,
'frame': frame_pred,
'r_adv': None
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label.reshape(-1,88)),
}
return predictions, losses, spec.squeeze(1) | 3,069 | 41.054795 | 122 | py |
ReconVAT | ReconVAT-master/model/helper_functions.py | import os
from model.dataset import *
from model.evaluate_functions import evaluate_wo_velocity
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.nn.utils import clip_grad_norm_
import numpy as np
# Mac users need to uncomment these two lines
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
from collections import defaultdict
def cycle(iterable):
while True:
for item in iterable:
yield item
def prepare_dataset(train_on, sequence_length, validation_length, leave_one_out, refresh, device, small=False):
train_groups, validation_groups = ['train'], ['validation'] # Parameters for MAESTRO
if leave_one_out is not None: # It applies only to MAESTRO
all_years = {'2004', '2006', '2008', '2009', '2011', '2013', '2014', '2015', '2017'}
train_groups = list(all_years - {str(leave_one_out)})
validation_groups = [str(leave_one_out)]
# Choosing the dataset to use
if train_on == 'MAESTRO':
dataset = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MAESTRO(groups=validation_groups, sequence_length=sequence_length)
# validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=validation_length, device=device, refresh=refresh)
elif train_on == 'MusicNet':
dataset = MusicNet(groups=['train'], sequence_length=sequence_length, device=device, refresh=refresh)
validation_dataset = MusicNet(groups=['test'], sequence_length=sequence_length, device=device, refresh=refresh)
else:
dataset = MAPS(groups=['AkPnBcht', 'AkPnBsdf', 'AkPnCGdD', 'AkPnStgb', 'SptkBGAm', 'SptkBGCl', 'StbgTGd2'],
sequence_length=sequence_length, overlap=False, device=device, refresh=refresh)
validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'],
sequence_length=validation_length, overlap=True, device=device, refresh=refresh)
full_validation = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=None, device=device, refresh=refresh)
return dataset, validation_dataset, full_validation
def prepare_VAT_dataset(sequence_length, validation_length, refresh, device, small=False, supersmall=False, dataset='MAPS'):
train_groups, validation_groups = ['train'], ['validation'] # Parameters for MAESTRO
if dataset=='MAPS':
# Choosing the dataset to use
if small==True:
l_set = MAPS(groups=['AkPnBcht'],
sequence_length=sequence_length, overlap=False, device=device,
refresh=refresh, supersmall=supersmall)
else:
l_set = MAPS(groups=['AkPnBcht', 'AkPnBsdf', 'AkPnCGdD', 'AkPnStgb', 'SptkBGAm', 'SptkBGCl', 'StbgTGd2'],
sequence_length=sequence_length, overlap=False, device=device, refresh=refresh)
ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=validation_length, overlap=True, device=device, refresh=refresh)
full_validation = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=None, device=device, refresh=refresh)
elif dataset=='Violin':
l_set = MusicNet(groups=['train_violin_l'],
sequence_length=sequence_length, device=device)
ul_set = MusicNet(groups=['train_violin_ul'],
sequence_length=sequence_length, device=device)
# ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MusicNet(groups=['test_violin'], sequence_length=validation_length, device=device)
full_validation = MusicNet(groups=['test_violin'], sequence_length=None, device=device)
elif dataset=='String':
l_set = MusicNet(groups=['train_string_l'],
sequence_length=sequence_length, device=device)
ul_set = MusicNet(groups=['train_string_ul'],
sequence_length=sequence_length, device=device)
# ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MusicNet(groups=['test_violin'], sequence_length=validation_length, device=device)
full_validation = MusicNet(groups=['test_violin'], sequence_length=None, device=device)
elif dataset=='Wind':
l_set = MusicNet(groups=['train_wind_l'],
sequence_length=sequence_length, device=device)
ul_set = MusicNet(groups=['train_wind_ul'],
sequence_length=sequence_length, device=device)
# ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MusicNet(groups=['test_wind'], sequence_length=validation_length, device=device)
full_validation = MusicNet(groups=['test_wind'], sequence_length=None, device=device)
elif dataset=='Flute':
l_set = MusicNet(groups=['train_flute_l'],
sequence_length=sequence_length, device=device)
ul_set = MusicNet(groups=['train_flute_ul'],
sequence_length=sequence_length, device=device)
# ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MusicNet(groups=['test_flute'], sequence_length=validation_length, device=device)
full_validation = MusicNet(groups=['test_flute'], sequence_length=None, device=device)
elif dataset=='Guqin':
l_set = Guqin(groups=['train_l'],
sequence_length=sequence_length, device=device, refresh=refresh)
ul_set = Guqin(groups=['train_ul'],
sequence_length=sequence_length, device=device, refresh=refresh)
validation_dataset = Guqin(groups=['test'], sequence_length=validation_length, device=device, refresh=refresh)
full_validation = Guqin(groups=['test'], sequence_length=None, device=device, refresh=refresh)
else:
raise Exception("Please choose the correct dataset")
return l_set, ul_set, validation_dataset, full_validation
def tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
VAT, VAT_start, reconstruction):
model.eval()
predictions, losses, mel = model.run_on_batch(batch_visualize, None, VAT)
loss = sum(losses.values())
if (ep)%logging_freq==0 or ep==1:
with torch.no_grad():
for key, values in evaluate_wo_velocity(validation_dataset, model, reconstruction=reconstruction, VAT=False).items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
if ('precision' in name or 'recall' in name or 'f1' in name) and 'chroma' not in name:
writer.add_scalar(key, np.mean(values), global_step=ep)
# if key.startswith('loss/'):
# writer.add_scalar(key, np.mean(values), global_step=ep)
model.eval()
test_losses = eval_model(model, ep, supervised_loader, VAT_start, VAT)
for key, values in test_losses.items():
if key.startswith('loss/'):
writer.add_scalar(key, np.mean(values), global_step=ep)
if ep==1: # Showing the original transcription and spectrograms
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Original', fig , ep)
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(batch_visualize['frame'].cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Label', fig , ep)
if predictions['r_adv'] is not None:
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
x_adv = i.transpose()+predictions['r_adv'][idx].t().cpu().numpy()
axs[idx].imshow(x_adv, vmax=1, vmin=0, cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Spec_adv', fig , ep)
if ep%logging_freq == 0:
for output_key in ['frame', 'onset', 'frame2', 'onset2']:
if output_key in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(predictions[output_key].detach().cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure(f'images/{output_key}', fig , ep)
# fig, axs = plt.subplots(2, 2, figsize=(24,4))
# axs = axs.flat
# for idx, i in enumerate(predictions['frame'].detach().cpu().numpy()):
# axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
# axs[idx].axis('off')
# fig.tight_layout()
# writer.add_figure('images/Transcription', fig , ep)
# if 'onset' in predictions.keys():
# fig, axs = plt.subplots(2, 2, figsize=(24,4))
# axs = axs.flat
# for idx, i in enumerate(predictions['onset'].detach().cpu().numpy()):
# axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
# axs[idx].axis('off')
# fig.tight_layout()
# writer.add_figure('images/onset', fig , ep)
if 'activation' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(predictions['activation'].detach().cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/activation', fig , ep)
if 'reconstruction' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(predictions['reconstruction'].cpu().detach().numpy().squeeze(1)):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Reconstruction', fig , ep)
# show adversarial samples
if predictions['r_adv'] is not None:
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
x_adv = i.transpose()+predictions['r_adv'][idx].t().cpu().numpy()
axs[idx].imshow(x_adv, vmax=1, vmin=0, cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Spec_adv', fig , ep)
# show attention
if 'attention' in predictions.keys():
fig = plt.figure(figsize=(90, 45))
# Creating the grid for 2 attention head for the transformer
outer = gridspec.GridSpec(2, 4, wspace=0.2, hspace=0.2)
fig.suptitle("Visualizing Attention Heads", size=20)
attentions = predictions['attention']
for i in range(n_heads):
# Creating the grid for 4 samples
inner = gridspec.GridSpecFromSubplotSpec(2, 2,
subplot_spec=outer[i], wspace=0.1, hspace=0.1)
ax = plt.Subplot(fig, outer[i])
ax.set_title(f'Head {i}', size=20) # This does not show up
for idx in range(predictions['attention'].shape[0]):
axCenter = plt.Subplot(fig, inner[idx])
fig.add_subplot(axCenter)
attention = attentions[idx, :, i]
attention = flatten_attention(attention, w_size)
axCenter.imshow(attention.cpu().detach(), cmap='jet')
attended_features = mel[idx]
# Create another plot on top and left of the attention map
divider = make_axes_locatable(axCenter)
axvert = divider.append_axes('left', size='30%', pad=0.5)
axhoriz = divider.append_axes('top', size='20%', pad=0.25)
axhoriz.imshow(attended_features.t().cpu().detach(), aspect='auto', origin='lower', cmap='jet')
axvert.imshow(predictions['frame'][idx].cpu().detach(), aspect='auto')
# changing axis for the center fig
axCenter.set_xticks([])
# changing axis for the output fig (left fig)
axvert.set_yticks([])
axvert.xaxis.tick_top()
axvert.set_title('Transcription')
axhoriz.set_title(f'Attended Feature (Spec)')
axhoriz.margins(x=0)
axvert.margins(y=0)
writer.add_figure('images/Attention', fig , ep)
def tensorboard_log_without_VAT(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
VAT, VAT_start, reconstruction):
model.eval()
predictions, losses, mel = model.run_on_batch(batch_visualize)
loss = sum(losses.values())
if (ep)%logging_freq==0 or ep==1:
with torch.no_grad():
for key, values in evaluate_wo_velocity(validation_dataset, model, reconstruction=reconstruction, VAT=False).items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
if ('precision' in name or 'recall' in name or 'f1' in name) and 'chroma' not in name:
writer.add_scalar(key, np.mean(values), global_step=ep)
# if key.startswith('loss/'):
# writer.add_scalar(key, np.mean(values), global_step=ep)
model.eval()
test_losses = eval_model(model, ep, supervised_loader, VAT_start, VAT)
for key, values in test_losses.items():
if key.startswith('loss/'):
writer.add_scalar(key, np.mean(values), global_step=ep)
if ep==1: # Showing the original transcription and spectrograms
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Original', fig , ep)
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(batch_visualize['frame'].cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Label', fig , ep)
if predictions['r_adv'] is not None:
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
x_adv = i.transpose()+predictions['r_adv'][idx].t().cpu().numpy()
axs[idx].imshow(x_adv, vmax=1, vmin=0, cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Spec_adv', fig , ep)
if ep%logging_freq == 0:
for output_key in ['frame']:
if output_key in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
predictions[output_key] = predictions[output_key].reshape(4,-1,88)
for idx, i in enumerate(predictions[output_key].detach().cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure(f'images/{output_key}', fig , ep)
# fig, axs = plt.subplots(2, 2, figsize=(24,4))
# axs = axs.flat
# for idx, i in enumerate(predictions['frame'].detach().cpu().numpy()):
# axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
# axs[idx].axis('off')
# fig.tight_layout()
# writer.add_figure('images/Transcription', fig , ep)
# if 'onset' in predictions.keys():
# fig, axs = plt.subplots(2, 2, figsize=(24,4))
# axs = axs.flat
# for idx, i in enumerate(predictions['onset'].detach().cpu().numpy()):
# axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
# axs[idx].axis('off')
# fig.tight_layout()
# writer.add_figure('images/onset', fig , ep)
if 'activation' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(predictions['activation'].detach().cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/activation', fig , ep)
if 'reconstruction' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(predictions['reconstruction'].cpu().detach().numpy().squeeze(1)):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Reconstruction', fig , ep)
# show adversarial samples
if predictions['r_adv'] is not None:
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
x_adv = i.transpose()+predictions['r_adv'][idx].t().cpu().numpy()
axs[idx].imshow(x_adv, vmax=1, vmin=0, cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Spec_adv', fig , ep)
# show attention
if 'attention' in predictions.keys():
fig = plt.figure(figsize=(90, 45))
# Creating the grid for 2 attention head for the transformer
outer = gridspec.GridSpec(2, 4, wspace=0.2, hspace=0.2)
fig.suptitle("Visualizing Attention Heads", size=20)
attentions = predictions['attention']
for i in range(n_heads):
# Creating the grid for 4 samples
inner = gridspec.GridSpecFromSubplotSpec(2, 2,
subplot_spec=outer[i], wspace=0.1, hspace=0.1)
ax = plt.Subplot(fig, outer[i])
ax.set_title(f'Head {i}', size=20) # This does not show up
for idx in range(predictions['attention'].shape[0]):
axCenter = plt.Subplot(fig, inner[idx])
fig.add_subplot(axCenter)
attention = attentions[idx, :, i]
attention = flatten_attention(attention, w_size)
axCenter.imshow(attention.cpu().detach(), cmap='jet')
attended_features = mel[idx]
# Create another plot on top and left of the attention map
divider = make_axes_locatable(axCenter)
axvert = divider.append_axes('left', size='30%', pad=0.5)
axhoriz = divider.append_axes('top', size='20%', pad=0.25)
axhoriz.imshow(attended_features.t().cpu().detach(), aspect='auto', origin='lower', cmap='jet')
axvert.imshow(predictions['frame'][idx].cpu().detach(), aspect='auto')
# changing axis for the center fig
axCenter.set_xticks([])
# changing axis for the output fig (left fig)
axvert.set_yticks([])
axvert.xaxis.tick_top()
axvert.set_title('Transcription')
axhoriz.set_title(f'Attended Feature (Spec)')
axhoriz.margins(x=0)
axvert.margins(y=0)
writer.add_figure('images/Attention', fig , ep)
def tensorboard_log_transcriber(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
VAT, VAT_start, reconstruction):
model.eval()
predictions, losses, mel = model.run_on_batch(batch_visualize, None, VAT)
loss = sum(losses.values())
if (ep)%logging_freq==0 or ep==1:
model.eval()
test_losses = eval_model(model, ep, supervised_loader, VAT_start, VAT)
for key, values in test_losses.items():
if key.startswith('loss/'):
writer.add_scalar(key, np.mean(values), global_step=ep)
if ep==1: # Showing the original transcription and spectrograms
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Original', fig , ep)
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(batch_visualize['frame'].cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Label', fig , ep)
if ep%logging_freq == 0:
if 'reconstruction' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(predictions['reconstruction'].cpu().detach().numpy().squeeze(1)):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Reconstruction', fig , ep)
# show attention
if 'attention' in predictions.keys():
fig = plt.figure(figsize=(90, 45))
# Creating the grid for 2 attention head for the transformer
outer = gridspec.GridSpec(2, 4, wspace=0.2, hspace=0.2)
fig.suptitle("Visualizing Attention Heads", size=20)
attentions = predictions['attention']
for i in range(n_heads):
# Creating the grid for 4 samples
inner = gridspec.GridSpecFromSubplotSpec(2, 2,
subplot_spec=outer[i], wspace=0.1, hspace=0.1)
ax = plt.Subplot(fig, outer[i])
ax.set_title(f'Head {i}', size=20) # This does not show up
for idx in range(4):
axCenter = plt.Subplot(fig, inner[idx])
fig.add_subplot(axCenter)
attention = attentions[idx, :, i]
attention = flatten_attention(attention, w_size)
axCenter.imshow(attention.cpu().detach(), cmap='jet')
attended_features = mel[idx]
# Create another plot on top and left of the attention map
divider = make_axes_locatable(axCenter)
axvert = divider.append_axes('left', size='30%', pad=0.5)
axhoriz = divider.append_axes('top', size='20%', pad=0.25)
axhoriz.imshow(attended_features.t().cpu().detach(), aspect='auto', origin='lower', cmap='jet')
axvert.imshow(batch_visualize['frame'][idx].cpu().detach(), aspect='auto')
# changing axis for the center fig
axCenter.set_xticks([])
# changing axis for the output fig (left fig)
axvert.set_yticks([])
axvert.xaxis.tick_top()
axvert.set_title('Transcription')
axhoriz.set_title(f'Attended Feature (Spec)')
axhoriz.margins(x=0)
axvert.margins(y=0)
writer.add_figure('images/Attention', fig , ep)
def flatten_attention(a, w_size=31):
w_size = (w_size-1)//2 # make it half window size
seq_len = a.shape[0]
n_heads = a.shape[1]
attentions = torch.zeros(seq_len, seq_len)
for t in range(seq_len):
start = 0 if t-w_size<0 else t-w_size
end = seq_len if t+w_size > seq_len else t+w_size
if t<w_size:
attentions[t, start:end+1] = a[t, -(end-start)-1:]
else:
attentions[t, start:end] = a[t, :(end-start)]
return attentions
def train_model(model, ep, loader, optimizer, scheduler, clip_gradient_norm):
model.train()
total_loss = 0
batch_idx = 0
batch_size = loader.batch_size
total_batch = len(loader.dataset)
# print(f'ep = {ep}, lr = {scheduler.get_lr()}')
for batch in loader:
predictions, losses, _ = model.run_on_batch(batch)
loss = sum(losses.values())
total_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
if clip_gradient_norm:
clip_grad_norm_(model.parameters(), clip_gradient_norm)
batch_idx += 1
print(f'Train Epoch: {ep} [{batch_idx*batch_size}/{total_batch}'
f'({100. * batch_idx*batch_size / total_batch:.0f}%)]'
f'\tLoss: {loss.item():.6f}'
, end='\r')
print(' '*100, end = '\r')
print(f'Train Epoch: {ep}\tLoss: {total_loss/len(loader):.6f}')
return predictions, losses, optimizer
def train_VAT_model(model, iteration, ep, l_loader, ul_loader, optimizer, scheduler, clip_gradient_norm, alpha, VAT=False, VAT_start=0):
model.train()
batch_size = l_loader.batch_size
total_loss = 0
l_loader = cycle(l_loader)
if ul_loader:
ul_loader = cycle(ul_loader)
for i in range(iteration):
optimizer.zero_grad()
batch_l = next(l_loader)
if (ep < VAT_start) or (VAT==False):
predictions, losses, _ = model.run_on_batch(batch_l,None, False)
else:
batch_ul = next(ul_loader)
predictions, losses, _ = model.run_on_batch(batch_l,batch_ul, VAT)
# loss = sum(losses.values())
loss = 0
for key in losses.keys():
if key.startswith('loss/train_LDS'):
# print(key)
loss += alpha*losses[key]/2 # No need to divide by 2 if you have only _l
else:
loss += losses[key]
# loss = losses['loss/train_frame'] + alpha*(losses['loss/train_LDS_l']+losses['loss/train_LDS_ul'])/2
loss.backward()
total_loss += loss.item()
optimizer.step()
scheduler.step()
if clip_gradient_norm:
clip_grad_norm_(model.parameters(), clip_gradient_norm)
print(f'Train Epoch: {ep} [{i*batch_size}/{iteration*batch_size}'
f'({100. * i / iteration:.0f}%)]'
f"\tMain Loss: {sum(losses.values()):.6f}\t"
# + f"".join([f"{k.split('/')[-1]}={v.item():.3e}\t" for k,v in losses.items()])
, end='\r')
print(' '*100, end = '\r')
print(f'Train Epoch: {ep}\tLoss: {total_loss/iteration:.6f}')
return predictions, losses, optimizer
def train_VAT_model_application(model, iteration, ep, l_loader, ul_loader, optimizer, scheduler, clip_gradient_norm, alpha, VAT=False, VAT_start=0):
model.train()
batch_size = l_loader.batch_size
total_loss = 0
l_loader = cycle(l_loader)
if ul_loader:
ul_loader = cycle(ul_loader)
for i in range(iteration):
optimizer.zero_grad()
batch_l = next(l_loader)
if (ep < VAT_start) or (VAT==False):
predictions, losses, _ = model.run_on_batch_application(batch_l,None, False)
else:
batch_ul = next(ul_loader)
predictions, losses, _ = model.run_on_batch_application(batch_l,batch_ul, VAT)
# loss = sum(losses.values())
loss = 0
for key in losses.keys():
if key.startswith('loss/train_LDS'):
# print(key)
loss += alpha*losses[key]/2 # No need to divide by 2 if you have only _l
else:
loss += losses[key]
# loss = losses['loss/train_frame'] + alpha*(losses['loss/train_LDS_l']+losses['loss/train_LDS_ul'])/2
loss.backward()
total_loss += loss.item()
optimizer.step()
scheduler.step()
if clip_gradient_norm:
clip_grad_norm_(model.parameters(), clip_gradient_norm)
print(f'Train Epoch: {ep} [{i*batch_size}/{iteration*batch_size}'
f'({100. * i / iteration:.0f}%)]'
f"\tMain Loss: {sum(losses.values()):.6f}\t"
# + f"".join([f"{k.split('/')[-1]}={v.item():.3e}\t" for k,v in losses.items()])
, end='\r')
print(' '*100, end = '\r')
print(f'Train Epoch: {ep}\tLoss: {total_loss/iteration:.6f}')
return predictions, losses, optimizer
def eval_model(model, ep, loader, VAT_start=0, VAT=False):
model.eval()
batch_size = loader.batch_size
metrics = defaultdict(list)
i = 0
for batch in loader:
if ep < VAT_start or VAT==False:
predictions, losses, _ = model.run_on_batch(batch, None, False)
else:
predictions, losses, _ = model.run_on_batch(batch, None, True)
for key, loss in losses.items():
metrics[key].append(loss.item())
print(f'Eval Epoch: {ep} [{i*batch_size}/{len(loader)*batch_size}'
f'({100. * i / len(loader):.0f}%)]'
f"\tMain Loss: {sum(losses.values()):.6f}"
, end='\r')
i += 1
print(' '*100, end = '\r')
return metrics | 31,908 | 45.44687 | 155 | py |
ReconVAT | ReconVAT-master/model/self_attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1) | 3,357 | 39.95122 | 135 | py |
ReconVAT | ReconVAT-master/model/utils.py | import sys
from functools import reduce
import torch
from PIL import Image
from torch.nn.modules.module import _addindent
def cycle(iterable):
while True:
for item in iterable:
yield item
def summary(model, file=sys.stdout):
def repr(model):
# We treat the extra repr like the sub-module, one item per line
extra_lines = []
extra_repr = model.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
total_params = 0
for key, module in model._modules.items():
mod_str, num_params = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
total_params += num_params
lines = extra_lines + child_lines
for name, p in model._parameters.items():
if hasattr(p, 'shape'):
total_params += reduce(lambda x, y: x * y, p.shape)
main_str = model._get_name() + '('
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
if file is sys.stdout:
main_str += ', \033[92m{:,}\033[0m params'.format(total_params) #[92m is green color, [0m is black color
else:
main_str += ', {:,} params'.format(total_params)
return main_str, total_params
string, count = repr(model)
if file is not None:
if isinstance(file, str):
file = open(file, 'w')
print(string, file=file)
file.flush()
return count
def save_pianoroll(path, onsets, frames, onset_threshold=0.5, frame_threshold=0.5, zoom=4):
"""
Saves a piano roll diagram
Parameters
----------
path: str
onsets: torch.FloatTensor, shape = [frames, bins]
frames: torch.FloatTensor, shape = [frames, bins]
onset_threshold: float
frame_threshold: float
zoom: int
"""
onsets = (1 - (onsets.t() > onset_threshold).to(torch.uint8)).cpu()
frames = (1 - (frames.t() > frame_threshold).to(torch.uint8)).cpu()
both = (1 - (1 - onsets) * (1 - frames))
image = torch.stack([onsets, frames, both], dim=2).flip(0).mul(255).numpy()
image = Image.fromarray(image, 'RGB')
image = image.resize((image.size[0], image.size[1] * zoom))
image.save(path)
class Normalization():
"""This class is for normalizing the spectrograms batch by batch. The normalization used is min-max, two modes 'framewise' and 'imagewise' can be selected. In this paper, we found that 'imagewise' normalization works better than 'framewise'"""
def __init__(self, mode='framewise'):
if mode == 'framewise':
def normalize(x):
size = x.shape
x_max = x.max(1, keepdim=True)[0] # Finding max values for each frame
x_min = x.min(1, keepdim=True)[0]
output = (x-x_min)/(x_max-x_min) # If there is a column with all zero, nan will occur
output[torch.isnan(output)]=0 # Making nan to 0
return output
elif mode == 'imagewise':
def normalize(x):
size = x.shape
x_max = x.view(size[0], size[1]*size[2]).max(1, keepdim=True)[0]
x_min = x.view(size[0], size[1]*size[2]).min(1, keepdim=True)[0]
x_max = x_max.unsqueeze(1) # Make it broadcastable
x_min = x_min.unsqueeze(1) # Make it broadcastable
return (x-x_min)/(x_max-x_min)
else:
print(f'please choose the correct mode')
self.normalize = normalize
def transform(self, x):
return self.normalize(x)
| 3,934 | 35.775701 | 247 | py |
ReconVAT | ReconVAT-master/model/dataset.py | import json
import os
from abc import abstractmethod
from glob import glob
import sys
import pickle
import pandas as pd
import numpy as np
import soundfile
from torch.utils.data import Dataset
from tqdm import tqdm
from .constants import *
from .midi import parse_midi
class PianoRollAudioDataset(Dataset):
def __init__(self, path, groups=None, sequence_length=None, seed=42, refresh=False, device='cpu'):
self.path = path
self.groups = groups if groups is not None else self.available_groups()
self.sequence_length = sequence_length
self.device = device
self.random = np.random.RandomState(seed)
self.refresh = refresh
self.data = []
print(f"Loading {len(self.groups)} group{'s' if len(self.groups) > 1 else ''} "
f"of {self.__class__.__name__} at {path}")
for group in self.groups:
for input_files in tqdm(self.files(group), desc='Loading group %s' % group): #self.files is defined in MAPS class
self.data.append(self.load(*input_files)) # self.load is a function defined below. It first loads all data into memory first
def __getitem__(self, index):
data = self.data[index]
result = dict(path=data['path'])
if self.sequence_length is not None:
audio_length = len(data['audio'])
step_begin = self.random.randint(audio_length - self.sequence_length) // HOP_LENGTH
# print(f'step_begin = {step_begin}')
n_steps = self.sequence_length // HOP_LENGTH
step_end = step_begin + n_steps
begin = step_begin * HOP_LENGTH
# print(f'begin = {begin}')
end = begin + self.sequence_length
result['audio'] = data['audio'][begin:end].to(self.device)
result['label'] = data['label'][step_begin:step_end, :].to(self.device)
result['velocity'] = data['velocity'][step_begin:step_end, :].to(self.device)
result['start_idx'] = begin
else:
result['audio'] = data['audio'].to(self.device)
result['label'] = data['label'].to(self.device)
result['velocity'] = data['velocity'].to(self.device).float()
result['audio'] = result['audio'].float().div_(32768.0) # converting to float by dividing it by 2^15
result['onset'] = (result['label'] == 3).float()
result['offset'] = (result['label'] == 1).float()
result['frame'] = (result['label'] > 1).float()
result['velocity'] = result['velocity'].float().div_(128.0)
# print(f"result['audio'].shape = {result['audio'].shape}")
# print(f"result['label'].shape = {result['label'].shape}")
return result
def __len__(self):
return len(self.data)
@classmethod # This one seems optional?
@abstractmethod # This is to make sure other subclasses also contain this method
def available_groups(cls):
"""return the names of all available groups"""
raise NotImplementedError
@abstractmethod
def files(self, group):
"""return the list of input files (audio_filename, tsv_filename) for this group"""
raise NotImplementedError
def load(self, audio_path, tsv_path):
"""
load an audio track and the corresponding labels
Returns
-------
A dictionary containing the following data:
path: str
the path to the audio file
audio: torch.ShortTensor, shape = [num_samples]
the raw waveform
label: torch.ByteTensor, shape = [num_steps, midi_bins]
a matrix that contains the onset/offset/frame labels encoded as:
3 = onset, 2 = frames after onset, 1 = offset, 0 = all else
velocity: torch.ByteTensor, shape = [num_steps, midi_bins]
a matrix that contains MIDI velocity values at the frame locations
"""
saved_data_path = audio_path.replace('.flac', '.pt').replace('.wav', '.pt')
if os.path.exists(saved_data_path) and self.refresh==False: # Check if .pt files exist, if so just load the files
return torch.load(saved_data_path)
# Otherwise, create the .pt files
audio, sr = soundfile.read(audio_path, dtype='int16')
assert sr == SAMPLE_RATE
audio = torch.ShortTensor(audio) # convert numpy array to pytorch tensor
audio_length = len(audio)
n_keys = MAX_MIDI - MIN_MIDI + 1
n_steps = (audio_length - 1) // HOP_LENGTH + 1 # This will affect the labels time steps
label = torch.zeros(n_steps, n_keys, dtype=torch.uint8)
velocity = torch.zeros(n_steps, n_keys, dtype=torch.uint8)
tsv_path = tsv_path
midi = np.loadtxt(tsv_path, delimiter='\t', skiprows=1)
# print(f'audio size = {audio.shape}')
# print(f'label size = {label.shape}')
for onset, offset, note, vel in midi:
left = int(round(onset * SAMPLE_RATE / HOP_LENGTH)) # Convert time to time step
onset_right = min(n_steps, left + HOPS_IN_ONSET) # Ensure the time step of onset would not exceed the last time step
frame_right = int(round(offset * SAMPLE_RATE / HOP_LENGTH))
frame_right = min(n_steps, frame_right) # Ensure the time step of frame would not exceed the last time step
offset_right = min(n_steps, frame_right + HOPS_IN_OFFSET)
f = int(note) - MIN_MIDI
label[left:onset_right, f] = 3
label[onset_right:frame_right, f] = 2
label[frame_right:offset_right, f] = 1
velocity[left:frame_right, f] = vel
data = dict(path=audio_path, audio=audio, label=label, velocity=velocity)
torch.save(data, saved_data_path)
return data
class MAESTRO(PianoRollAudioDataset):
def __init__(self, path='../../public_data/MAESTRO/', groups=None, sequence_length=None, seed=42, refresh=False, device='cpu'):
super().__init__(path, groups if groups is not None else ['train'], sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['train', 'validation', 'test']
def files(self, group):
if group not in self.available_groups():
# year-based grouping
flacs = sorted(glob(os.path.join(self.path, group, '*.flac')))
if len(flacs) == 0:
flacs = sorted(glob(os.path.join(self.path, group, '*.wav')))
midis = sorted(glob(os.path.join(self.path, group, '*.midi')))
files = list(zip(flacs, midis))
if len(files) == 0:
raise RuntimeError(f'Group {group} is empty')
else:
metadata = json.load(open(os.path.join(self.path, 'maestro-v2.0.0.json')))
files = sorted([(os.path.join(self.path, row['audio_filename'].replace('.wav', '.flac')),
os.path.join(self.path, row['midi_filename'])) for row in metadata if row['split'] == group])
files = [(audio if os.path.exists(audio) else audio.replace('.flac', '.wav'), midi) for audio, midi in files]
result = []
for audio_path, midi_path in files:
tsv_filename = midi_path.replace('.midi', '.tsv').replace('.mid', '.tsv')
if not os.path.exists(tsv_filename):
midi = parse_midi(midi_path)
np.savetxt(tsv_filename, midi, fmt='%.6f', delimiter='\t', header='onset,offset,note,velocity')
result.append((audio_path, tsv_filename))
return result
class MAPS(PianoRollAudioDataset):
def __init__(self, path='./MAPS', groups=None, sequence_length=None, overlap=True,
seed=42, refresh=False, device='cpu', supersmall=False):
self.overlap = overlap
self.supersmall = supersmall
super().__init__(path, groups if groups is not None else ['ENSTDkAm', 'ENSTDkCl'], sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['AkPnBcht', 'AkPnBsdf', 'AkPnCGdD', 'AkPnStgb', 'ENSTDkAm', 'ENSTDkCl', 'SptkBGAm', 'SptkBGCl', 'StbgTGd2']
def files(self, group):
flacs = glob(os.path.join(self.path, 'flac', '*_%s.flac' % group))
if self.overlap==False:
with open('overlapping.pkl', 'rb') as f:
test_names = pickle.load(f)
filtered_flacs = []
for i in flacs:
if any([substring in i for substring in test_names]):
pass
else:
filtered_flacs.append(i)
flacs = sorted(filtered_flacs)
if self.supersmall==True:
# print(sorted(filtered_flacs))
flacs = [sorted(filtered_flacs)[3]]
# tsvs = [f.replace('/flac/', '/tsv/matched/').replace('.flac', '.tsv') for f in flacs]
tsvs = [f.replace('/flac/', '/tsvs/').replace('.flac', '.tsv') for f in flacs]
# print(flacs)
assert(all(os.path.isfile(flac) for flac in flacs))
assert(all(os.path.isfile(tsv) for tsv in tsvs))
return sorted(zip(flacs, tsvs))
class MusicNet(PianoRollAudioDataset):
def __init__(self, path='./MusicNet', groups=None, sequence_length=None, seed=42, refresh=False, device='cpu'):
super().__init__(path, groups if groups is not None else ['train'], sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['train', 'test']
def read_id(self, path, group, mode):
train_meta = pd.read_csv(os.path.join(path,f'{mode}_metadata.csv'))
return train_meta[train_meta['ensemble'].str.contains(group)]['id'].values
def appending_flac_tsv(self, id_list, mode):
flacs = []
tsvs = []
for i in id_list:
flacs.extend(glob(os.path.join(self.path, f"{mode}_data", f"{i}.flac")))
tsvs.extend(glob(os.path.join(self.path, f"tsv_{mode}_labels/{i}.tsv")))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
return flacs, tsvs
def files(self, group):
string_keys = ['Solo Violin', 'Violin and Harpsichord',
'Accompanied Violin', 'String Quartet',
'String Sextet', 'Viola Quintet',
'Solo Cello', 'Accompanied Cello']
wind_keys = ['Accompanied Clarinet', 'Clarinet Quintet',
'Pairs Clarinet-Horn-Bassoon', 'Clarinet-Cello-Piano Trio',
'Wind Octet', 'Wind Quintet']
train_meta = pd.read_csv(os.path.join(self.path,f'train_metadata.csv'))
if group == 'small test':
types = ('2303.flac', '2382.flac', '1819.flac')
flacs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'test_data', i)))
flacs = sorted(flacs)
tsvs = sorted(glob(os.path.join(self.path, f'tsv_test_labels/*.tsv')))
elif group == 'train_string_l':
types = np.array([0])
for key in string_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[:1]
types = np.concatenate((types,l))
types = np.delete(types, 0)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_string_ul':
types = np.array([0])
for key in string_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[1:]
types = np.concatenate((types,l))
types = np.delete(types, 0)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_violin_l':
type1 = self.read_id(self.path, 'Solo Violin', 'train')
type2 = self.read_id(self.path, 'Accompanied Violin', 'train')
types = np.concatenate((type1,type2))
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_violin_ul':
type1 = self.read_id(self.path, 'String Quartet', 'train')
type2 = self.read_id(self.path, 'String Sextet', 'train')
types = np.concatenate((type1,type2))
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'test_violin':
types = ('2106', '2191', '2298', '2628')
flacs, tsvs = self.appending_flac_tsv(types, 'test')
elif group == 'train_wind_l':
types = np.array([0])
for key in wind_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[:1]
types = np.concatenate((types,l))
types = np.delete(types, 0)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_wind_ul':
types = np.array([0])
for key in wind_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[1:]
types = np.concatenate((types,l))
types = np.delete(types, 0)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'test_wind':
types = ('1819', '2416')
flacs, tsvs = self.appending_flac_tsv(types, 'test')
elif group == 'train_flute_l':
types = ('2203',)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_flute_ul':
types = np.array([0])
for key in wind_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[:]
types = np.concatenate((types,l))
types = np.delete(types, 0)
types = np.concatenate((types,('2203',)))
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'test_flute':
types = ('2204',)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
else:
types = self.read_id(self.path, group, 'train')
flacs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'train_data', f"{i}.flac")))
flacs = sorted(flacs)
tsvs = sorted(glob(os.path.join(self.path, f'tsv_train_labels/*.tsv')))
# else:
# flacs = sorted(glob(os.path.join(self.path, f'{group}_data/*.flac')))
# tsvs = sorted(glob(os.path.join(self.path, f'tsv_{group}_labels/*.tsv')))
# else:
# flacs = sorted(glob(os.path.join(self.path, f'{group}_data/*.flac')))
# tsvs = sorted(glob(os.path.join(self.path, f'tsv_{group}_labels/*.tsv')))
assert(all(os.path.isfile(flac) for flac in flacs))
assert(all(os.path.isfile(tsv) for tsv in tsvs))
return zip(flacs, tsvs)
class Guqin(PianoRollAudioDataset):
def __init__(self, path='./Guqin', groups=None, sequence_length=None, seed=42, refresh=False, device='cpu'):
super().__init__(path, groups if groups is not None else ['train'], sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['train_l', 'train_ul', 'test']
def read_id(self, path, group, mode):
train_meta = pd.read_csv(os.path.join(path,f'{mode}_metadata.csv'))
return train_meta[train_meta['ensemble'].str.contains(group)]['id'].values
def appending_flac_tsv(self, id_list, mode):
flacs = []
tsvs = []
for i in id_list:
flacs.extend(glob(os.path.join(self.path, f"{mode}_data", f"{i}.flac")))
tsvs.extend(glob(os.path.join(self.path, f"tsv_{mode}_labels/{i}.tsv")))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
return flacs, tsvs
def files(self, group):
if group=='train_l':
types = ['jiou', 'siang', 'ciou', 'yi', 'yu', 'feng', 'yang']
flacs = []
tsvs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'audio', i + '.flac')))
tsvs.extend(glob(os.path.join(self.path, 'tsv_label', i + '.tsv')))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
return zip(flacs, tsvs)
elif group == 'train_ul':
types = [
]
flacs = []
tsvs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'audio', i + '.flac')))
tsvs.extend(glob(os.path.join(self.path, 'tsv_label', i + '.tsv')))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
return zip(flacs, tsvs)
elif group == 'test':
types = ['gu', 'guan', 'liang',
]
flacs = []
tsvs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'audio', i + '.flac')))
tsvs.extend(glob(os.path.join(self.path, 'tsv_label', i + '.tsv')))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
print(f'flacs = {flacs}')
print(f'tsvs = {tsvs}')
return zip(flacs, tsvs)
else:
raise Exception("Please choose a valid group")
class Corelli(PianoRollAudioDataset):
def __init__(self, path='./Application_String', groups=None, sequence_length=None, overlap=True,
seed=42, refresh=False, device='cpu', supersmall=False):
self.overlap = overlap
self.supersmall = supersmall
super().__init__(path, groups, sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['op6_no1', 'op6_no2', 'op6_no3']
def files(self, group):
flacs = glob(os.path.join(self.path, group, '*.flac'))
if self.overlap==False:
with open('overlapping.pkl', 'rb') as f:
test_names = pickle.load(f)
filtered_flacs = []
for i in flacs:
if any([substring in i for substring in test_names]):
pass
else:
filtered_flacs.append(i)
flacs = sorted(filtered_flacs)
if self.supersmall==True:
# print(sorted(filtered_flacs))
flacs = [sorted(filtered_flacs)[3]]
# tsvs = [f.replace('/flac/', '/tsv/matched/').replace('.flac', '.tsv') for f in flacs]
tsvs = [f.replace('/flac/', '/tsvs/').replace('.flac', '.tsv') for f in flacs]
# print(flacs)
assert(all(os.path.isfile(flac) for flac in flacs))
assert(all(os.path.isfile(tsv) for tsv in tsvs))
return sorted(zip(flacs, tsvs))
class Application_Dataset(Dataset):
def __init__(self, path, seed=42, device='cpu'):
self.path = path
self.device = device
self.data = []
for input_files in tqdm(self.files(path), desc='Loading files'): #self.files is defined in MAPS class
self.data.append(self.load(input_files)) # self.load is a function defined below. It first loads all data into memory first
def __getitem__(self, index):
data = self.data[index]
result = dict(path=data['path'])
audio_length = len(data['audio'])
result['audio'] = data['audio'].to(self.device)
result['audio'] = result['audio'].float().div_(32768.0) # converting to float by dividing it by 2^15
return result
def __len__(self):
return len(self.data)
@abstractmethod
def files(self, group):
# Only need to load flac files
flacs = glob(os.path.join(self.path, '*.flac'))
flacs.extend(glob(os.path.join(self.path, '*.wav'))) # If there are wav files, also load them
assert(all(os.path.isfile(flac) for flac in flacs))
return flacs
def load(self, audio_path):
"""
load an audio track and the corresponding labels
Returns
-------
A dictionary containing the following data:
path: str
the path to the audio file
audio: torch.ShortTensor, shape = [num_samples]
the raw waveform
label: torch.ByteTensor, shape = [num_steps, midi_bins]
a matrix that contains the onset/offset/frame labels encoded as:
3 = onset, 2 = frames after onset, 1 = offset, 0 = all else
velocity: torch.ByteTensor, shape = [num_steps, midi_bins]
a matrix that contains MIDI velocity values at the frame locations
"""
saved_data_path = audio_path.replace('.flac', '.pt').replace('.wav', '.pt')
# Otherwise, create the .pt files
audio, sr = soundfile.read(audio_path, dtype='int16')
#
assert sr == SAMPLE_RATE, f'Please make sure the sampling rate is 16k.\n{saved_data_path} has a sampling of {sr}'
audio = torch.ShortTensor(audio) # convert numpy array to pytorch tensor
audio_length = len(audio)
data = dict(path=audio_path, audio=audio)
return data
class Application_Wind(PianoRollAudioDataset):
def __init__(self, path='./Application_Wind', groups=None, sequence_length=None, overlap=True,
seed=42, refresh=False, device='cpu', supersmall=False):
self.overlap = overlap
self.supersmall = supersmall
super().__init__(path, groups, sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['dummy']
def files(self, group):
flacs = glob(os.path.join(self.path, '*.flac'))
if self.overlap==False:
with open('overlapping.pkl', 'rb') as f:
test_names = pickle.load(f)
filtered_flacs = []
for i in flacs:
if any([substring in i for substring in test_names]):
pass
else:
filtered_flacs.append(i)
flacs = sorted(filtered_flacs)
if self.supersmall==True:
# print(sorted(filtered_flacs))
flacs = [sorted(filtered_flacs)[3]]
# tsvs = [f.replace('/flac/', '/tsv/matched/').replace('.flac', '.tsv') for f in flacs]
tsvs = [f.replace('/flac/', '/tsvs/').replace('.flac', '.tsv') for f in flacs]
# print(flacs)
assert(all(os.path.isfile(flac) for flac in flacs))
assert(all(os.path.isfile(tsv) for tsv in tsvs))
return sorted(zip(flacs, tsvs)) | 22,968 | 40.914234 | 140 | py |
ReconVAT | ReconVAT-master/model/self_attenttion_model.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1)
class MutliHeadAttention2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3,3), stride=(1,1), groups=1, bias=False):
"""kernel_size is the 2D local attention window size"""
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding_time = (kernel_size[0]-1)//2
self.padding_freq = (kernel_size[1]-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_channels % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
# Relative position encoding
self.rel_t = nn.Parameter(torch.randn(out_channels // 2, 1, 1, kernel_size[0], 1), requires_grad=True)
self.rel_f = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1, kernel_size[1]), requires_grad=True)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, channels, height, width = x.size()
padded_x = F.pad(x, [self.padding_freq, self.padding_freq, self.padding_time, self.padding_time])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size[0], self.stride[0]).unfold(3, self.kernel_size[1], self.stride[1])
# (batch, channels, H, W, H_local_w, W_local_w)
v_out = v_out.unfold(2, self.kernel_size[0], self.stride[0]).unfold(3, self.kernel_size[1], self.stride[1])
# (batch, channels, H, W, H_local_w, W_local_w)
k_out_t, k_out_f = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_t + self.rel_t, k_out_f + self.rel_f), dim=1) # relative position?
k_out = k_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
# (batch, n_heads, feature_per_head, H, W, local H X W)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, self.groups, self.out_channels // self.groups, height, width, 1)
# (batch, n_heads, feature_per_head, H, W, 1)
# Alternative way to express dot product
# same as k_out = k_out.permute(0,1,3,4,2,5)
# and then energy = torch.matmul(q_out,k_out)
energy = (q_out * k_out).sum(dim=2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, n_heads, 1, H, W, local HXW)
out = attention*v_out
# (batch, n_heads, feature_per_head, H, W, local HXW)
# (batch, c, H, W)
return out.sum(-1).flatten(1,2), attention.squeeze(2)
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out', nonlinearity='relu')
init.normal_(self.rel_t, 0, 1)
init.normal_(self.rel_f, 0, 1)
class ConvStack(nn.Module):
def __init__(self, input_features, output_features):
super().__init__()
# input is batch_size * 1 channel * frames * input_features
self.cnn = nn.Sequential(
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(output_features // 16, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
nn.Conv2d(output_features // 16, output_features // 8, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
)
self.fc = nn.Sequential(
nn.Linear((output_features // 8) * (input_features // 4), output_features),
nn.Dropout(0.5)
)
def forward(self, spec):
x = spec.view(spec.size(0), 1, spec.size(1), spec.size(2))
x = self.cnn(x)
x = x.transpose(1, 2).flatten(-2)
x = self.fc(x)
return x
class Onset_Stack(nn.Module):
def __init__(self, input_features, model_size, output_features, sequence_model):
super().__init__()
self.convstack = ConvStack(input_features, model_size)
self.sequence_model = sequence_model
self.linear = nn.Linear(model_size, output_features)
def forward(self, x):
x = self.convstack(x)
x, a = self.sequence_model(x)
x = self.linear(x)
return torch.sigmoid(x), a
class Combine_Stack_with_attn(nn.Module):
def __init__(self, model_size, output_features, sequence_model, attention_mode, w_size):
super().__init__()
self.sequence_model = sequence_model
self.w_size = w_size
self.linear = nn.Linear(model_size, output_features)
def forward(self, x):
x, a = self.sequence_model(x)
x = self.linear(x)
return torch.sigmoid(x), a
class OnsetsAndFrames_self_attention(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, log=True, mode='imagewise', spec='Mel', device='cpu', attention_mode='activation', w_size=30, n_heads=8, onset_stack=True, LSTM=True):
super().__init__()
self.onset_stack=onset_stack
self.w_size=w_size
self.device = device
self.log = log
self.normalize = Normalization(mode)
self.attention_mode=attention_mode
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
model_size = model_complexity * 16
sequence_model = lambda input_size, output_size: MutliHeadAttention1D(in_features=input_size,
out_features=output_size,
kernel_size=w_size,
groups=n_heads)
self.combined_stack = Combine_Stack_with_attn(model_size, output_features,
sequence_model(output_features * 2,
model_size),
attention_mode,
w_size)
self.onset_stack = Onset_Stack(input_features, model_size, output_features,
sequence_model(model_size, model_size))
self.frame_stack = nn.Sequential(
ConvStack(input_features, model_size),
nn.Linear(model_size, output_features),
nn.Sigmoid()
)
def forward(self, spec):
onset_pred, onset_attention = self.onset_stack(spec)
seq_len = onset_pred.shape[1]
# offset_pred = self.offset_stack(mel)
activation_pred = self.frame_stack(spec)
combined_pred = torch.cat([onset_pred.detach(), activation_pred], dim=-1)
# hidden = (h,c) # Setting the first hidden state to be the output from onset stack
frame_pred, combined_attention = self.combined_stack(combined_pred) # Attenting on onset
return onset_pred, activation_pred, frame_pred, combined_attention
def run_on_batch(self, batch):
audio_label = batch['audio']
onset_label = batch['onset']
# offset_label = batch['offset']
frame_label = batch['frame']
# velocity_label = batch['velocity']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
if self.onset_stack:
predictions = {
# 'onset': onset_pred.reshape(*onset_label.shape),
'onset': onset_pred.reshape(*onset_label.shape),
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
losses = {
'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
# 'loss/offset': F.binary_cross_entropy(predictions['offset'], offset_label),
'loss/frame': F.binary_cross_entropy(predictions['frame'], frame_label),
# 'loss/velocity': self.velocity_loss(predictions['velocity'], velocity_label, onset_label)
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'activation': activation_pred,
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a
}
losses = {
'loss/frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class simple_onset_frame(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.sequence_model_onset = MutliHeadAttention1D(in_features=input_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm_onset = nn.LayerNorm(model_complexity)
self.linear_onset = nn.Linear(model_complexity, output_features)
self.sequence_model_frame = MutliHeadAttention1D(in_features=model_complexity+output_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm_frame = nn.LayerNorm(model_complexity)
self.linear_frame = nn.Linear(model_complexity, output_features)
def forward(self, spec):
x, a = self.sequence_model_onset(spec)
x = self.layer_norm_onset(x)
onset_pred = torch.sigmoid(self.linear_onset(x))
# Version 1 try concate
x = torch.cat((onset_pred, x), -1)
# Version 2 try add
x, _ = self.sequence_model_frame(x)
x = self.layer_norm_frame(x)
frame_pred = torch.sigmoid(self.linear_frame(x))
return frame_pred, onset_pred, a
def run_on_batch(self, batch):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
frame_pred, onset_pred, a = self(spec)
predictions = {
'onset': onset_pred.reshape(*frame_label.shape),
# 'activation': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a
}
if self.training:
losses = {
'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
else:
losses = {
'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class standalone_self_attention_1D(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True, layernorm_pos=None):
super().__init__()
self.w_size=w_size
self.log = log
self.layernorm_pos = layernorm_pos
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.sequence_model = MutliHeadAttention1D(in_features=input_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
if layernorm_pos=='Before':
self.layer_norm = nn.LayerNorm(model_complexity)
elif layernorm_pos=='After':
self.layer_norm = nn.LayerNorm(output_features)
self.linear = nn.Linear(model_complexity, output_features)
def forward(self, spec):
x, a = self.sequence_model(spec)
if self.layernorm_pos=='Before':
x = self.layer_norm(x)
x = self.linear(x)
if self.layernorm_pos=='After':
x = self.layer_norm(x)
frame_pred = torch.sigmoid(x)
return frame_pred, a
def run_on_batch(self, batch):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
frame_pred, a = self(spec)
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
# 'activation': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a
}
if self.training:
losses = {
# 'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
else:
losses = {
# 'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class standalone_self_attention_2D(nn.Module):
def __init__(self, input_features, output_features, model_complexity=16, w_size=(3,3),
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.sequence_model = MutliHeadAttention2D(in_channels=input_features,
out_channels=model_complexity,
kernel_size=w_size,
stride=(1,1),
groups=1, bias=False)
self.linear = nn.Linear(N_BINS*model_complexity, output_features)
def forward(self, spec):
spec = spec.unsqueeze(1)
x, a = self.sequence_model(spec)
x = x.transpose(1,2).flatten(2)
frame_pred = torch.sigmoid(self.linear(x))
return frame_pred, a
def run_on_batch(self, batch):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
frame_pred, a = self(spec)
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
# 'activation': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
# 'attention': a
}
if self.training:
losses = {
# 'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
else:
losses = {
# 'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param) | 29,286 | 40.958453 | 211 | py |
ReconVAT | ReconVAT-master/model/__init__.py | from .constants import *
from .dataset import MAPS, MAESTRO, MusicNet, Corelli, Application_Wind, Application_Dataset
from .decoding import *
from .midi import save_midi
from .utils import *
from .evaluate_functions import *
from .helper_functions import *
# from .Conv_Seq2Seq import *
from .self_attenttion_model import *
from .VAT import *
from .onset_frame_VAT import *
from .UNet_onset import *
from .self_attention_VAT import *
from .Segmentation import *
from .Thickstun_model import *
from .Unet_prestack import * | 521 | 31.625 | 92 | py |
ReconVAT | ReconVAT-master/model/decoding.py | import numpy as np
import torch
def extract_notes_wo_velocity(onsets, frames, onset_threshold=0.5, frame_threshold=0.5, rule='rule1'):
"""
Finds the note timings based on the onsets and frames information
Parameters
----------
onsets: torch.FloatTensor, shape = [frames, bins]
frames: torch.FloatTensor, shape = [frames, bins]
velocity: torch.FloatTensor, shape = [frames, bins]
onset_threshold: float
frame_threshold: float
Returns
-------
pitches: np.ndarray of bin_indices
intervals: np.ndarray of rows containing (onset_index, offset_index)
velocities: np.ndarray of velocity values
"""
onsets = (onsets > onset_threshold).cpu().to(torch.uint8)
frames = (frames > frame_threshold).cpu().to(torch.uint8)
onset_diff = torch.cat([onsets[:1, :], onsets[1:, :] - onsets[:-1, :]], dim=0) == 1 # Make sure the activation is only 1 time-step
if rule=='rule2':
pass
elif rule=='rule1':
# Use in simple models
onset_diff = onset_diff & (frames==1) # New condition such that both onset and frame on to get a note
else:
raise NameError('Please enter the correct rule name')
pitches = []
intervals = []
for nonzero in torch.nonzero(onset_diff, as_tuple=False):
frame = nonzero[0].item()
pitch = nonzero[1].item()
onset = frame
offset = frame
# This while loop is looking for where does the note ends
while onsets[offset, pitch].item() or frames[offset, pitch].item():
offset += 1
if offset == onsets.shape[0]:
break
# After knowing where does the note start and end, we can return the pitch information (and velocity)
if offset > onset:
pitches.append(pitch)
intervals.append([onset, offset])
return np.array(pitches), np.array(intervals)
def extract_notes(onsets, frames, velocity, onset_threshold=0.5, frame_threshold=0.5):
"""
Finds the note timings based on the onsets and frames information
Parameters
----------
onsets: torch.FloatTensor, shape = [frames, bins]
frames: torch.FloatTensor, shape = [frames, bins]
velocity: torch.FloatTensor, shape = [frames, bins]
onset_threshold: float
frame_threshold: float
Returns
-------
pitches: np.ndarray of bin_indices
intervals: np.ndarray of rows containing (onset_index, offset_index)
velocities: np.ndarray of velocity values
"""
onsets = (onsets > onset_threshold).cpu().to(torch.uint8)
frames = (frames > frame_threshold).cpu().to(torch.uint8)
onset_diff = torch.cat([onsets[:1, :], onsets[1:, :] - onsets[:-1, :]], dim=0) == 1 # Make sure the activation is only 1 time-step
pitches = []
intervals = []
velocities = []
for nonzero in onset_diff.nonzero():
frame = nonzero[0].item()
pitch = nonzero[1].item()
onset = frame
offset = frame
velocity_samples = []
# This while loop is looking for where does the note ends
while onsets[offset, pitch].item() or frames[offset, pitch].item():
if onsets[offset, pitch].item():
velocity_samples.append(velocity[offset, pitch].item())
offset += 1
if offset == onsets.shape[0]:
break
# After knowing where does the note start and end, we can return the pitch information (and velocity)
if offset > onset:
pitches.append(pitch)
intervals.append([onset, offset])
velocities.append(np.mean(velocity_samples) if len(velocity_samples) > 0 else 0)
return np.array(pitches), np.array(intervals), np.array(velocities)
def notes_to_frames(pitches, intervals, shape):
"""
Takes lists specifying notes sequences and return
Parameters
----------
pitches: list of pitch bin indices
intervals: list of [onset, offset] ranges of bin indices
shape: the shape of the original piano roll, [n_frames, n_bins]
Returns
-------
time: np.ndarray containing the frame indices
freqs: list of np.ndarray, each containing the frequency bin indices
"""
roll = np.zeros(tuple(shape))
for pitch, (onset, offset) in zip(pitches, intervals):
roll[onset:offset, pitch] = 1
time = np.arange(roll.shape[0])
freqs = [roll[t, :].nonzero()[0] for t in time]
return time, freqs
| 4,479 | 33.198473 | 134 | py |
ReconVAT | ReconVAT-master/model/UNet_onset.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
from itertools import cycle
def create_triangular_cycle(start, end, period):
triangle_a = torch.linspace(start,end,period)
triangle_b = torch.linspace(end,start,period)[1:-1]
triangle=torch.cat((triangle_a,triangle_b))
return cycle(triangle)
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, f"out_channels should be divided by groups. (example: out_channels: 40, groups: 4). Now out_channels={self.out_features}, groups={self.groups}"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1)
class UNet_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div, reconstruction=False):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
self.binwise = False
self.reconstruction = reconstruction
def forward(self, model, x):
with torch.no_grad():
frame_ref, onset_ref, _ = model.transcriber(x) # This will be used as a label, therefore no need grad()
# if self.reconstruction:
# pianoroll, _ = model.transcriber(x)
# reconstruction, _ = self.reconstructor(pianoroll)
# pianoroll2_ref, _ = self.transcriber(reconstruction)
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
# if self.reconstruction:
# d2 = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
frame_pred, onset_pred, _ = model.transcriber(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(y_pred, y_ref)
else:
frame_loss = F.binary_cross_entropy(frame_pred, frame_ref)
onset_loss = F.binary_cross_entropy(onset_pred, onset_ref)
loss = (frame_loss + onset_loss)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e10
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
assert torch.isnan(r_adv).any()==False, f"r_adv has nan, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
assert torch.isnan(r_adv).any()==False, f"r_adv has inf, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
# print(f'd max = {d.max()}\td min = {d.min()}')
# print(f'r_adv max = {r_adv.max()}\tr_adv min = {r_adv.min()}')
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
frame_pred, onset_pred, _ = model.transcriber(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(y_pred, y_ref)
else:
vat_frame_loss = F.binary_cross_entropy(frame_pred, frame_ref)
vat_onset_loss = F.binary_cross_entropy(onset_pred, onset_ref)
vat_loss = {'frame': vat_frame_loss,
'onset': vat_onset_loss}
return vat_loss, r_adv, _l2_normalize(d, binwise=self.binwise) # already averaged
def _l2_normalize(d, binwise):
# input shape (batch, timesteps, bins, ?)
if binwise==True:
d = d/(torch.abs(d)+1e-8)
else:
d = d/(torch.norm(d, dim=-1, keepdim=True))
return d
def binary_kl_div(y_pred, y_ref):
y_pred = torch.clamp(y_pred, 1e-4, 0.9999) # prevent inf in kl_div
y_ref = torch.clamp(y_ref, 1e-4, 0.9999)
q = torch.stack((y_pred, 1-y_pred), -1)
p = torch.stack((y_ref, 1-y_ref), -1)
assert torch.isnan(p.log()).any()==False, "r_adv exploded, please debug tune down the XI for VAT"
assert torch.isinf(p.log()).any()==False, "r_adv vanished, please debug tune up the XI for VAT"
return F.kl_div(p.log(), q, reduction='batchmean')
batchNorm_momentum = 0.1
class block(nn.Module):
def __init__(self, inp, out, ksize, pad, ds_ksize, ds_stride):
super(block, self).__init__()
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, padding=pad)
self.bn1 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.conv2 = nn.Conv2d(out, out, kernel_size=ksize, padding=pad)
self.bn2 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.skip = nn.Conv2d(inp, out, kernel_size=1, padding=0)
self.ds = nn.Conv2d(out, out, kernel_size=ds_ksize, stride=ds_stride, padding=0)
def forward(self, x):
x11 = F.leaky_relu(self.bn1(self.conv1(x)))
x12 = F.leaky_relu(self.bn2(self.conv2(x11)))
x12 += self.skip(x)
xp = self.ds(x12)
return xp, xp, x12.size()
class d_block(nn.Module):
def __init__(self, inp, out, isLast, ksize, pad, ds_ksize, ds_stride):
super(d_block, self).__init__()
self.conv2d = nn.ConvTranspose2d(inp, int(inp/2), kernel_size=ksize, padding=pad)
self.bn2d = nn.BatchNorm2d(int(inp/2), momentum= batchNorm_momentum)
self.conv1d = nn.ConvTranspose2d(int(inp/2), out, kernel_size=ksize, padding=pad)
if not isLast:
self.bn1d = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.us = nn.ConvTranspose2d(inp-out, inp-out, kernel_size=ds_ksize, stride=ds_stride)
else:
self.us = nn.ConvTranspose2d(inp, inp, kernel_size=ds_ksize, stride=ds_stride)
def forward(self, x, size=None, isLast=None, skip=None):
# print(f'x.shape={x.shape}')
# print(f'target shape = {size}')
x = self.us(x,output_size=size)
if not isLast: x = torch.cat((x, skip), 1)
x = F.leaky_relu(self.bn2d(self.conv2d(x)))
if isLast: x = self.conv1d(x)
else: x = F.leaky_relu(self.bn1d(self.conv1d(x)))
return x
class Encoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Encoder, self).__init__()
self.block1 = block(1,16,(3,3),(1,1),ds_ksize, ds_stride)
self.block2 = block(16,32,(3,3),(1,1),ds_ksize, ds_stride)
self.block3 = block(32,64,(3,3),(1,1),ds_ksize, ds_stride)
self.block4 = block(64,128,(3,3),(1,1),ds_ksize, ds_stride)
self.conv1 = nn.Conv2d(64,64, kernel_size=(3,3), padding=(1,1))
self.conv2 = nn.Conv2d(32,32, kernel_size=(3,3), padding=(1,1))
self.conv3 = nn.Conv2d(16,16, kernel_size=(3,3), padding=(1,1))
def forward(self, x):
x1,idx1,s1 = self.block1(x)
x2,idx2,s2 = self.block2(x1)
x3,idx3,s3 = self.block3(x2)
x4,idx4,s4 = self.block4(x3)
c1=self.conv1(x3)
c2=self.conv2(x2)
c3=self.conv3(x1)
return x4,[s1,s2,s3,s4],[c1,c2,c3,x1]
class Decoder(nn.Module):
def __init__(self,ds_ksize, ds_stride, num_instruments):
super(Decoder, self).__init__()
self.d_block1 = d_block(192,64,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block2 = d_block(96,32,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block3 = d_block(48,16,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block4 = d_block(16,num_instruments,True,(3,3),(1,1),ds_ksize, ds_stride)
def forward(self, x, s, c=[None,None,None,None]):
x = self.d_block1(x,s[3],False,c[0])
x = self.d_block2(x,s[2],False,c[1])
x = self.d_block3(x,s[1],False,c[2])
x = self.d_block4(x,s[0],True,c[3])
# reconsturction = torch.sigmoid(self.d_block4(x,s[0],True,c[3]))
# return torch.sigmoid(x) # This is required to boost the accuracy
return x # This is required to boost the accuracy
class Stack(nn.Module):
def __init__(self, input_size, hidden_dim, attn_size=31, attn_group=4, output_dim=88, dropout=0.5):
super().__init__()
self.attention = MutliHeadAttention1D(input_size, hidden_dim, attn_size, position=True, groups=attn_group)
self.linear = nn.Linear(hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x, a = self.attention(x)
x = self.linear(x)
x = self.dropout(x)
return x, a
class Spec2Roll(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
self.Unet1_decoder = Decoder(ds_ksize, ds_stride, 2)
self.lstm1 = MutliHeadAttention1D(N_BINS+88, N_BINS*complexity, 31, position=True, groups=complexity)
# self.lstm1 = nn.LSTM(N_BINS, N_BINS, batch_first=True, bidirectional=True)
self.linear1 = nn.Linear(N_BINS*complexity, 88)
self.linear_onset = nn.Linear(N_BINS, 88)
self.linear_feature = nn.Linear(N_BINS, 88)
self.dropout_layer = nn.Dropout(0.5)
# self.onset_stack = Stack(input_size=N_BINS, hidden_dim=768, attn_size=31, attn_group=4, output_dim=88, dropout=0)
# self.feat_stack = Stack(input_size=N_BINS, hidden_dim=768, attn_size=31, attn_group=4, output_dim=88, dropout=0)
self.combine_stack = Stack(input_size=88*2, hidden_dim=768, attn_size=31, attn_group=6, output_dim=88, dropout=0)
def forward(self, x):
# U-net 1
x,s,c = self.Unet1_encoder(x)
x = self.Unet1_decoder(x,s,c)
onset = self.linear_onset(x[:,0])
onset = torch.sigmoid(onset)
feat = self.linear_feature(x[:,1])
x = torch.cat((onset, feat), -1)
x, a = self.combine_stack(x)
pianoroll = torch.sigmoid(x)
return pianoroll, onset, a
class Roll2Spec(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet2_encoder = Encoder(ds_ksize, ds_stride)
self.Unet2_decoder = Decoder(ds_ksize, ds_stride, 1)
# self.lstm2 = nn.LSTM(88, N_BINS, batch_first=True, bidirectional=True)
self.lstm2 = MutliHeadAttention1D(88, N_BINS*complexity, 31, position=True, groups=4)
self.linear2 = nn.Linear(N_BINS*complexity, N_BINS)
def forward(self, x):
# U-net 2
x, a = self.lstm2(x)
x= torch.sigmoid(self.linear2(x)) # ToDo, remove the sigmoid activation and see if we get a better result
x,s,c = self.Unet2_encoder(x.unsqueeze(1))
reconstruction = self.Unet2_decoder(x,s,c) # predict roll
# x,s,c = self.Unet2_encoder(x.unsqueeze(1))
# x = self.Unet2_decoder(x,s,c) # predict roll
# x, a = self.lstm2(x.squeeze(1))
# reconstruction = self.linear2(x) # ToDo, remove the sigmoid activation and see if we get a better result
# reconstruction = reconstruction.clamp(0,1).unsqueeze(1)
return reconstruction, a
class UNet_Onset(nn.Module):
def __init__(self, ds_ksize, ds_stride, log=True, reconstruction=True, mode='imagewise', spec='CQT', device='cpu', XI=1e-6, eps=1e-2):
super().__init__()
global N_BINS # using the N_BINS parameter from constant.py
# Selecting the type of spectrogram to use
if spec == 'CQT':
r=2
N_BINS = 88*r
self.spectrogram = Spectrogram.CQT1992v2(sr=SAMPLE_RATE, hop_length=HOP_LENGTH,
n_bins=N_BINS, fmin=27.5,
bins_per_octave=12*r, trainable=False)
elif spec == 'Mel':
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
else:
print(f'Please select a correct spectrogram')
self.log = log
self.normalize = Normalization(mode)
self.reconstruction = reconstruction
self.vat_loss = UNet_VAT(XI, eps, 1, False)
# self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
# self.Unet1_decoder = Decoder(ds_ksize, ds_stride)
# self.lstm1 = MutliHeadAttention1D(N_BINS, N_BINS*4, 31, position=True, groups=4)
# # self.lstm1 = nn.LSTM(N_BINS, N_BINS, batch_first=True, bidirectional=True)
# self.linear1 = nn.Linear(N_BINS*4, 88)
self.transcriber = Spec2Roll(ds_ksize, ds_stride)
if reconstruction==True:
# self.Unet2_encoder = Encoder(ds_ksize, ds_stride)
# self.Unet2_decoder = Decoder(ds_ksize, ds_stride)
# # self.lstm2 = nn.LSTM(88, N_BINS, batch_first=True, bidirectional=True)
# self.lstm2 = MutliHeadAttention1D(88, N_BINS*4, 31, position=True, groups=4)
# self.linear2 = nn.Linear(N_BINS*4, N_BINS)
self.reconstructor = Roll2Spec(ds_ksize, ds_stride)
def forward(self, x):
# U-net 1
pianoroll, onset, a = self.transcriber(x)
if self.reconstruction:
# U-net 2
reconstruction, a_reconstruct = self.reconstructor(pianoroll)
# Applying U-net 1 to the reconstructed spectrograms
pianoroll2, onset2, a_2 = self.transcriber(reconstruction)
# # U-net2
# x, h = self.lstm2(pianoroll)
# feat2= torch.sigmoid(self.linear2(x)) # ToDo, remove the sigmoid activation and see if we get a better result
# x,s,c = self.Unet2_encoder(feat2.unsqueeze(1))
# reconstruction = self.Unet2_decoder(x,s,c) # predict roll
# # Applying U-net 1 to the reconstructed spectrograms
# x,s,c = self.Unet1_encoder(reconstruction)
# feat1b = self.Unet1_decoder(x,s,c)
# x, h = self.lstm1(feat1b.squeeze(1)) # remove the channel dim
# pianoroll2 = torch.sigmoid(self.linear1(x)) # Use the full LSTM output
return reconstruction, pianoroll, onset, pianoroll2, onset2, a
else:
return pianoroll, onset, a
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2).unsqueeze(1)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = {'frame': torch.tensor(0.),
'onset': torch.tensor(0.)}
r_norm_ul = torch.tensor(0.)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
r_adv = r_adv.squeeze(1) # remove the channel dimension
else:
r_adv = None
lds_l = {'frame': torch.tensor(0.),
'onset': torch.tensor(0.)}
r_norm_l = torch.tensor(0.)
if frame_label.dim()==2:
frame_label=frame_label.unsqueeze(0)
if onset_label.dim()==2:
onset_label=onset_label.unsqueeze(0)
if self.reconstruction:
reconstrut, pianoroll, onset, pianoroll2, onset2, a = self(spec)
if self.training:
predictions = {
'frame': pianoroll,
'onset': onset,
'frame2':pianoroll2,
'onset2':onset2,
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/train_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/train_onset': F.binary_cross_entropy(predictions['onset'].squeeze(1), onset_label),
'loss/train_onset2': F.binary_cross_entropy(predictions['onset2'].squeeze(1), onset_label),
'loss/train_LDS_l_frame': lds_l['frame'],
'loss/train_LDS_l_onset': lds_l['onset'],
'loss/train_LDS_ul_frame': lds_ul['frame'],
'loss/train_LDS_ul_onset': lds_ul['onset'],
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'frame': pianoroll.reshape(*frame_label.shape),
'onset': onset.reshape(*onset.shape),
'frame2':pianoroll2.reshape(*frame_label.shape),
'onset2':onset2.reshape(*onset.shape),
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/test_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/test_onset': F.binary_cross_entropy(predictions['onset'].squeeze(1), onset_label),
'loss/test_onset2': F.binary_cross_entropy(predictions['onset2'].squeeze(1), onset_label),
'loss/test_LDS_l_frame': lds_l['frame'],
'loss/test_LDS_l_onset': lds_l['onset'],
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
else:
frame_pred, onset, a = self(spec)
if self.training:
predictions = {
'onset': onset,
'frame': frame_pred,
'r_adv': r_adv,
'attention': a,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_onset': F.binary_cross_entropy(predictions['onset'].squeeze(1), onset_label),
'loss/train_LDS_l_frame': lds_l['frame'],
'loss/train_LDS_l_onset': lds_l['onset'],
'loss/train_LDS_ul_frame': lds_ul['frame'],
'loss/train_LDS_ul_onset': lds_ul['onset'],
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': onset.reshape(*onset.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
'attention': a,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_onset': F.binary_cross_entropy(predictions['onset'].squeeze(1), onset_label),
'loss/test_LDS_l_frame': lds_l['frame'],
'loss/test_LDS_l_onset': lds_l['onset'],
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameterds
param = param.data
own_state[name].copy_(param)
| 25,563 | 45.144404 | 196 | py |
ReconVAT | ReconVAT-master/model/Spectrogram.py | """
Module containing all the spectrogram classes
"""
# 0.2.0
import torch
import torch.nn as nn
from torch.nn.functional import conv1d, conv2d, fold
import scipy # used only in CFP
import numpy as np
from time import time
from nnAudio.librosa_functions import *
from nnAudio.utils import *
sz_float = 4 # size of a float
epsilon = 10e-8 # fudge factor for normalization
### --------------------------- Spectrogram Classes ---------------------------###
class STFT(torch.nn.Module):
"""This function is to calculate the short-time Fourier transform (STFT) of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the STFT kernel, if ``True``, the time index is the center of
the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
inverse : bool
To activate the iSTFT module or not. By default, it is False to save GPU memory.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``
output_format : str
Control the spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``.
The output_format can also be changed during the ``forward`` method.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cpu'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.STFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, pad_mode='reflect', iSTFT=False,
fmin=50, fmax=6000, sr=22050, trainable=False,
output_format="Complex", verbose=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.output_format = output_format
self.trainable = trainable
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.freq_bins = freq_bins
self.trainable = trainable
self.pad_amount = self.n_fft // 2
self.window = window
self.win_length = win_length
self.iSTFT = iSTFT
self.trainable = trainable
start = time()
# Create filter windows for stft
kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=freq_bins,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=verbose)
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float)
# In this way, the inverse kernel and the forward kernel do not share the same memory...
kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0)
kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0)
if iSTFT:
self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1))
self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1))
# Making all these variables nn.Parameter, so that the model can be used with nn.Parallel
# self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable)
# self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable)
# Applying window functions to the Fourier kernels
window_mask = torch.tensor(window_mask)
wsin = kernel_sin * window_mask
wcos = kernel_cos * window_mask
if self.trainable==False:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if self.trainable==True:
wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable)
wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
# Prepare the shape of window mask so that it can be used later in inverse
self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1))
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
output_format : str
Control the type of spectrogram to be return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``.
Default value is ``Complex``.
"""
output_format = output_format or self.output_format
self.num_samples = x.shape[-1]
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.pad_amount, 0)
elif self.pad_mode == 'reflect':
if self.num_samples < self.pad_amount:
raise AssertionError("Signal length shorter than reflect padding length (n_fft // 2).")
padding = nn.ReflectionPad1d(self.pad_amount)
x = padding(x)
spec_imag = conv1d(x, self.wsin, stride=self.stride)
spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d
# remove redundant parts
spec_real = spec_real[:, :self.freq_bins, :]
spec_imag = spec_imag[:, :self.freq_bins, :]
if output_format=='Magnitude':
spec = spec_real.pow(2) + spec_imag.pow(2)
if self.trainable==True:
return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to output=0
else:
return torch.sqrt(spec)
elif output_format=='Complex':
return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary part
elif output_format=='Phase':
return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads to error in calculating phase
def inverse(self, X, onesided=True, length=None, refresh_win=True):
"""
This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class,
which is to convert spectrograms back to waveforms.
It only works for the complex value spectrograms. If you have the magnitude spectrograms,
please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
Parameters
----------
onesided : bool
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
length : int
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
refresh_win : bool
Recalculating the window sum square. If you have an input with fixed number of timesteps,
you can increase the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True``
"""
if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True):
raise NameError("Please activate the iSTFT module by setting `iSTFT=True` if you want to use `inverse`")
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)."\
"\nIf you have a magnitude spectrogram, please consider using Griffin-Lim."
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
def extra_repr(self) -> str:
return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format(
self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable
)
class MelSpectrogram(torch.nn.Module):
"""This function is to calculate the Melspectrogram of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio.
It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Mel filter banks. The filter banks maps the n_fft to mel bins.
Default value is 128.
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``,
the time index is the beginning of the STFT kernel, if ``True``, the time index is the
center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the
Mel scale is logarithmic. The default value is ``False``.
fmin : int
The starting frequency for the lowest Mel filter bank.
fmax : int
The ending frequency for the highest Mel filter bank.
trainable_mel : bool
Determine if the Mel filter banks are trainable or not. If ``True``, the gradients for Mel
filter banks will also be calculated and the Mel filter banks will be updated during model
training. Default value is ``False``.
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MelSpectrogram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512,
window='hann', center=True, pad_mode='reflect', power=2.0, htk=False,
fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False,
verbose=True, **kwargs):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.power = power
self.trainable_mel = trainable_mel
self.trainable_STFT = trainable_STFT
# Preparing for the stft layer. No need for center
self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window,
freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT,
output_format="Magnitude", verbose=verbose, **kwargs)
# Create filter windows for stft
start = time()
# Creating kernel for mel spectrogram
start = time()
mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)
mel_basis = torch.tensor(mel_basis)
if verbose==True:
print("STFT filter created, time used = {:.4f} seconds".format(time()-start))
print("Mel filter created, time used = {:.4f} seconds".format(time()-start))
else:
pass
if trainable_mel:
# Making everything nn.Parameter, so that this model can support nn.DataParallel
mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel)
self.register_parameter('mel_basis', mel_basis)
else:
self.register_buffer('mel_basis', mel_basis)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self, x):
"""
Convert a batch of waveforms to Mel spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
spec = self.stft(x, output_format='Magnitude')**self.power
melspec = torch.matmul(self.mel_basis, spec)
return melspec
def extra_repr(self) -> str:
return 'Mel filter banks size = {}, trainable_mel={}'.format(
(*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT
)
class MFCC(torch.nn.Module):
"""This function is to calculate the Mel-frequency cepstral coefficients (MFCCs) of the input signal.
This algorithm first extracts Mel spectrograms from the audio clips,
then the discrete cosine transform is calcuated to obtain the final MFCCs.
Therefore, the Mel spectrogram part can be made trainable using
``trainable_mel`` and ``trainable_STFT``.
It only support type-II DCT at the moment. Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_mfcc : int
The number of Mel-frequency cepstral coefficients
norm : string
The default value is 'ortho'. Normalization for DCT basis
**kwargs
Other arguments for Melspectrogram such as n_fft, n_mels, hop_length, and window
Returns
-------
MFCCs : torch.tensor
It returns a tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MFCC()
>>> mfcc = spec_layer(x)
"""
def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs):
super().__init__()
self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs)
self.m_mfcc = n_mfcc
# attributes that will be used for _power_to_db
if amin <= 0:
raise ParameterError('amin must be strictly positive')
amin = torch.tensor([amin])
ref = torch.abs(torch.tensor([ref]))
self.register_buffer('amin', amin)
self.register_buffer('ref', ref)
self.top_db = top_db
self.n_mfcc = n_mfcc
def _power_to_db(self, S):
'''
Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db
for the original implmentation.
'''
log_spec = 10.0 * torch.log10(torch.max(S, self.amin))
log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref))
if self.top_db is not None:
if self.top_db < 0:
raise ParameterError('top_db must be non-negative')
# make the dim same as log_spec so that it can be broadcasted
batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1)
log_spec = torch.max(log_spec, batch_wise_max - self.top_db)
return log_spec
def _dct(self, x, norm=None):
'''
Refer to https://github.com/zh217/torch-dct for the original implmentation.
'''
x = x.permute(0,2,1) # make freq the last axis, since dct applies to the frequency axis
x_shape = x.shape
N = x_shape[-1]
v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2)
Vc = torch.rfft(v, 1, onesided=False)
# TODO: Can make the W_r and W_i trainable here
k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, :, 0] * W_r - Vc[:, :, :, 1] * W_i
if norm == 'ortho':
V[:, :, 0] /= np.sqrt(N) * 2
V[:, :, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V
return V.permute(0,2,1) # swapping back the time axis and freq axis
def forward(self, x):
"""
Convert a batch of waveforms to MFCC.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = self.melspec_layer(x)
x = self._power_to_db(x)
x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:]
return x
def extra_repr(self) -> str:
return 'n_mfcc = {}'.format(
(self.n_mfcc)
)
class Gammatonegram(torch.nn.Module):
"""
This function is to calculate the Gammatonegram of the input signal. Input signal should be in either of the following shapes. 1. ``(len_audio)``, 2. ``(num_audio, len_audio)``, 3. ``(num_audio, 1, len_audio)``. The correct shape will be inferred autommatically if the input follows these 3 shapes. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Gammatonegram filter banks. The filter banks maps the n_fft to Gammatone bins. Default value is 64
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the STFT kernel, if ``True``, the time index is the center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the Mel scale is logarithmic. The default value is ``False``
fmin : int
The starting frequency for the lowest Gammatone filter bank
fmax : int
The ending frequency for the highest Gammatone filter bank
trainable_mel : bool
Determine if the Gammatone filter banks are trainable or not. If ``True``, the gradients for Mel filter banks will also be caluclated and the Mel filter banks will be updated during model training. Default value is ``False``
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT kernels will also be caluclated and the STFT kernels will be updated during model training. Default value is ``False``
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cuda:0'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.Gammatonegram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=44100, n_fft=2048, n_bins=64, hop_length=512, window='hann', center=True, pad_mode='reflect',
power=2.0, htk=False, fmin=20.0, fmax=None, norm=1, trainable_bins=False, trainable_STFT=False,
verbose=True, device='cuda:0'):
super(Gammatonegram, self).__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.device = device
self.power = power
# Create filter windows for stft
start = time()
wsin, wcos, self.bins2freq, _, _ = create_fourier_kernels(n_fft, freq_bins=None, window=window, freq_scale='no',
sr=sr)
wsin = torch.tensor(wsin, dtype=torch.float)
wcos = torch.tensor(wcos, dtype=torch.float)
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_STFT)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_STFT)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
# Creating kenral for Gammatone spectrogram
start = time()
gammatone_basis = gammatone(sr, n_fft, n_bins, fmin, fmax)
gammatone_basis = torch.tensor(gammatone_basis)
if verbose == True:
print("STFT filter created, time used = {:.4f} seconds".format(time() - start))
print("Gammatone filter created, time used = {:.4f} seconds".format(time() - start))
else:
pass
# Making everything nn.Prarmeter, so that this model can support nn.DataParallel
if trainable_bins:
gammatone_basis = torch.nn.Parameter(gammatone_basis, requires_grad=trainable_bins)
self.register_parameter('gammatone_basis', gammatone_basis)
else:
self.register_buffer('gammatone_basis', gammatone_basis)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self, x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft // 2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft // 2)
x = padding(x)
spec = torch.sqrt(conv1d(x, self.wsin, stride=self.stride).pow(2) \
+ conv1d(x, self.wcos, stride=self.stride).pow(2)) ** self.power # Doing STFT by using conv1d
gammatonespec = torch.matmul(self.gammatone_basis, spec)
return gammatonespec
class CQT1992(torch.nn.Module):
"""
This alogrithm uses the method proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more
computational and memory efficient version.
[1] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
trainable_STFT : bool
Determine if the time to frequency domain transformation kernel for the input audio is trainable or not.
Default is ``False``
trainable_CQT : bool
Determine if the frequency domain CQT kernel is trainable or not.
Default is ``False``
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84,
trainable_STFT=False, trainable_CQT=False, bins_per_octave=12,
output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'):
super().__init__()
# norm arg is not functioning
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.norm = norm
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1]
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# creating kernels for stft
# self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa
# self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width,
window='ones',
freq_scale='no')
# Converting kernels from numpy arrays to torch tensors
wsin = torch.tensor(kernel_sin * window)
wcos = torch.tensor(kernel_cos * window)
cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_STFT)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_STFT)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_CQT)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_CQT)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# STFT
fourier_real = conv1d(x, self.wcos, stride=self.hop_length)
fourier_imag = conv1d(x, self.wsin, stride=self.hop_length)
# CQT
CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag),
(fourier_real, fourier_imag))
CQT = torch.stack((CQT_real,-CQT_imag),-1)
if self.norm:
CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT2010(torch.nn.Module):
"""
This algorithm is using the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave.
Then we keep downsampling the input audio by a factor of 2 to convoluting it with the
small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled
input is equavalent to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code
from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12,
norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False,
trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.output_format = output_format
self.earlydownsample = earlydownsample # TODO: activate early downsampling later if possible
# This will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.5,
kernelLength=256,
transitionBandwidth=0.001
)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Calculate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
# print("n_octaves = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
# print("Q = {}, fmin_t = {}, n_filters = {}".format(Q, self.fmin_t, n_filters))
basis, self.n_fft, _ = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# This is for the normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis=basis
fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to freq domain
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32))
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# Preparing kernels for Short-Time Fourier Transform (STFT)
# We set the frequency range in the CQT filter instead of here.
if verbose==True:
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no')
wsin = kernel_sin * window
wcos = kernel_cos * window
wsin = torch.tensor(wsin)
wcos = torch.tensor(wcos)
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_STFT)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_STFT)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_CQT)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_CQT)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins
if self.norm:
CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1)
# is make it same mag as 1992
CQT = CQT*self.downsample_factor
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT1992v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the method proposed in [1]. I slightly modify it so that it runs faster
than the original 1992 algorithm, that is why I call it version 2.
[1] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect',
trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
# norm arg is not functioning
self.trainable = trainable
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1)
cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
else:
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8)
return CQT
elif output_format=='Complex':
return torch.stack((CQT_real,CQT_imag),-1)
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def forward_manual(self,x):
"""
Method for debugging
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)
CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
return CQT*torch.sqrt(self.lenghts.view(-1,1))
class CQT2010v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the
input audio by a factor of 2 to convoluting it with the small CQT kernel.
Everytime the input audio is downsampled, the CQT relative to the downsampled input is equivalent
to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the
code from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the
argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically.
Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : bool
Normalization for the CQT result.
basis_norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``
output_format : str
Determine the return type.
'Magnitude' will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins, time_steps)``;
'Complex' will return the STFT result in complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``;
'Phase' will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT2010v2()
>>> specs = spec_layer(x)
"""
# To DO:
# need to deal with the filter and other tensors
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect',
earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.earlydownsample = earlydownsample # We will activate early downsampling later if possible
self.trainable = trainable
self.output_format = output_format
# It will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
# self.lowpass_filter = torch.tensor(
# create_lowpass_filter(
# band_center = 0.50,
# kernelLength=256,
# transitionBandwidth=0.001))
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.50,
kernelLength=256,
transitionBandwidth=0.001)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Caluate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
if verbose==True:
print("num_octave = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
basis, self.n_fft, lenghts = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# For normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis = basis
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1)
cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x,output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins
# print("downsample_factor = ",self.downsample_factor)
# print(CQT.shape)
# print(self.lenghts.view(-1,1).shape)
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it
# same mag as 1992
CQT = CQT*self.downsample_factor
# Normalize again to get same result as librosa
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
else:
return torch.sqrt(CQT.pow(2).sum(-1)+1e-8)
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
class CQT(CQT1992v2):
"""An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation"""
pass
# The section below is for developing purpose
# Please don't use the following classes
#
class DFT(torch.nn.Module):
"""
Experimental feature before `torch.fft` was made avaliable.
The inverse function only works for 1 single frame. i.e. input shape = (batch, n_fft, 1)
"""
def __init__(self, n_fft=2048, freq_bins=None, hop_length=512,
window='hann', freq_scale='no', center=True, pad_mode='reflect',
fmin=50, fmax=6000, sr=22050):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
# Create filter windows for stft
wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float)
self.wcos = torch.tensor(wcos, dtype=torch.float)
def forward(self,x):
"""
Convert a batch of waveforms to spectrums.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
imag = conv1d(x, self.wsin, stride=self.stride)
real = conv1d(x, self.wcos, stride=self.stride)
return (real, -imag)
def inverse(self,x_real,x_imag):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x_real : torch tensor
Real part of the signal.
x_imag : torch tensor
Imaginary part of the signal.
"""
x_real = broadcast_dim(x_real)
x_imag = broadcast_dim(x_imag)
x_real.transpose_(1,2) # Prepare the right shape to do inverse
x_imag.transpose_(1,2) # Prepare the right shape to do inverse
# if self.center:
# if self.pad_mode == 'constant':
# padding = nn.ConstantPad1d(self.n_fft//2, 0)
# elif self.pad_mode == 'reflect':
# padding = nn.ReflectionPad1d(self.n_fft//2)
# x_real = padding(x_real)
# x_imag = padding(x_imag)
# Watch out for the positive and negative signs
# ifft = e^(+2\pi*j)*X
# ifft(X_real) = (a1, a2)
# ifft(X_imag)*1j = (b1, b2)*1j
# = (-b2, b1)
a1 = conv1d(x_real, self.wcos, stride=self.stride)
a2 = conv1d(x_real, self.wsin, stride=self.stride)
b1 = conv1d(x_imag, self.wcos, stride=self.stride)
b2 = conv1d(x_imag, self.wsin, stride=self.stride)
imag = a2+b1
real = a1-b2
return (real/self.n_fft, imag/self.n_fft)
class iSTFT(torch.nn.Module):
"""This class is to convert spectrograms back to waveforms. It only works for the complex value spectrograms.
If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
The parameters (e.g. n_fft, window) need to be the same as the STFT in order to obtain the correct inverse.
If trainability is not required, it is recommended to use the ``inverse`` method under the ``STFT`` class
to save GPU/RAM memory.
When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that the inverse is perfect, please
use with extra care.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins
Please make sure the value is the same as the forward STFT.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable_kernels : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
trainable_window : bool
Determine if the window function is trainable or not.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a batch of waveforms.
Examples
--------
>>> spec_layer = Spectrogram.iSTFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False,
trainable_window=False, verbose=True, refresh_win=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.n_fft = n_fft
self.win_length = win_length
self.stride = hop_length
self.center = center
self.pad_amount = self.n_fft // 2
self.refresh_win = refresh_win
start = time()
# Create the window function and prepare the shape for batch-wise-time-wise multiplication
# Create filter windows for inverse
kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=False)
window_mask = get_window(window,int(win_length), fftbins=True)
# For inverse, the Fourier kernels do not need to be windowed
window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1)
# kernel_sin and kernel_cos have the shape (freq_bins, 1, n_fft, 1) to support 2D Conv
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1)
# Decide if the Fourier kernels are trainable
if trainable_kernels:
# Making all these variables trainable
kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels)
kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels)
self.register_parameter('kernel_sin', kernel_sin)
self.register_parameter('kernel_cos', kernel_cos)
else:
self.register_buffer('kernel_sin', kernel_sin)
self.register_buffer('kernel_cos', kernel_cos)
# Decide if the window function is trainable
if trainable_window:
window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window)
self.register_parameter('window_mask', window_mask)
else:
self.register_buffer('window_mask', window_mask)
if verbose==True:
print("iSTFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, X, onesided=False, length=None, refresh_win=None):
"""
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
If your input spectrograms X are of the same length, please use ``refresh_win=None`` to increase
computational speed.
"""
if refresh_win==None:
refresh_win=self.refresh_win
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)"
# If the input spectrogram contains only half of the n_fft
# Use extend_fbins function to get back another half
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
class Griffin_Lim(torch.nn.Module):
"""
Converting Magnitude spectrograms back to waveforms based on the "fast Griffin-Lim"[1].
This Griffin Lim is a direct clone from librosa.griffinlim.
[1] Perraudin, N., Balazs, P., & Søndergaard, P. L. “A fast Griffin-Lim algorithm,”
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4), Oct. 2013.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
n_iter=32 : int
The number of iterations for Griffin-Lim. The default value is ``32``
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
momentum : float
The momentum for the update rule. The default value is ``0.99``.
device : str
Choose which device to initialize this layer. Default value is 'cpu'
"""
def __init__(self,
n_fft,
n_iter=32,
hop_length=None,
win_length=None,
window='hann',
center=True,
pad_mode='reflect',
momentum=0.99,
device='cpu'):
super().__init__()
self.n_fft = n_fft
self.win_length = win_length
self.n_iter = n_iter
self.center = center
self.pad_mode = pad_mode
self.momentum = momentum
self.device = device
if win_length==None:
self.win_length=n_fft
else:
self.win_length=win_length
if hop_length==None:
self.hop_length = n_fft//4
else:
self.hop_length = hop_length
# Creating window function for stft and istft later
self.w = torch.tensor(get_window(window,
int(self.win_length),
fftbins=True),
device=device).float()
def forward(self, S):
"""
Convert a batch of magnitude spectrograms to waveforms.
Parameters
----------
S : torch tensor
Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)``
"""
assert S.dim()==3 , "Please make sure your input is in the shape of (batch, freq_bins, timesteps)"
# Initializing Random Phase
rand_phase = torch.randn(*S.shape, device=self.device)
angles = torch.empty((*S.shape,2), device=self.device)
angles[:, :,:,0] = torch.cos(2 * np.pi * rand_phase)
angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase)
# Initializing the rebuilt magnitude spectrogram
rebuilt = torch.zeros(*angles.shape, device=self.device)
for _ in range(self.n_iter):
tprev = rebuilt # Saving previous rebuilt magnitude spec
# spec2wav conversion
# print(f'win_length={self.win_length}\tw={self.w.shape}')
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
# wav2spec conversion
rebuilt = torch.stft(inverse,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
pad_mode=self.pad_mode)
# Phase update rule
angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1 + self.momentum)) * tprev[:,:,:]
# Phase normalization
angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase
# Using the final phase to reconstruct the waveforms
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
return inverse
class Combined_Frequency_Periodicity(nn.Module):
"""
Vectorized version of the code in https://github.com/leo-so/VocalMelodyExtPatchCNN/blob/master/MelodyExt.py.
This feature is described in 'Combining Spectral and Temporal Representations for Multipitch Estimation of Polyphonic Music'
https://ieeexplore.ieee.org/document/7118691
Under development, please report any bugs you found
"""
def __init__(self,fr=2, fs=16000, hop_length=320,
window_size=2049, fc=80, tc=1/1000,
g=[0.24, 0.6, 1], NumPerOct=48):
super().__init__()
self.window_size = window_size
self.hop_length = hop_length
# variables for STFT part
self.N = int(fs/float(fr)) # Will be used to calculate padding
self.f = fs*np.linspace(0, 0.5, np.round(self.N//2), endpoint=True) # it won't be used but will be returned
self.pad_value = ((self.N-window_size))
# Create window function, always blackmanharris?
h = scipy.signal.blackmanharris(window_size).astype(np.float32) # window function for STFT
self.register_buffer('h',torch.tensor(h))
# variables for CFP
self.NumofLayer = np.size(g)
self.g = g
self.tc_idx = round(fs*tc) # index to filter out top tc_idx and bottom tc_idx bins
self.fc_idx = round(fc/fr) # index to filter out top fc_idx and bottom fc_idx bins
self.HighFreqIdx = int(round((1/tc)/fr)+1)
self.HighQuefIdx = int(round(fs/fc)+1)
# attributes to be returned
self.f = self.f[:self.HighFreqIdx]
self.q = np.arange(self.HighQuefIdx)/float(fs)
# filters for the final step
freq2logfreq_matrix, quef2logfreq_matrix = self.create_logfreq_matrix(self.f, self.q, fr, fc, tc, NumPerOct, fs)
self.register_buffer('freq2logfreq_matrix',torch.tensor(freq2logfreq_matrix.astype(np.float32)))
self.register_buffer('quef2logfreq_matrix',torch.tensor(quef2logfreq_matrix.astype(np.float32)))
def _CFP(self, spec):
spec = torch.relu(spec).pow(self.g[0])
if self.NumofLayer >= 2:
for gc in range(1, self.NumofLayer):
if np.remainder(gc, 2) == 1:
ceps = torch.rfft(spec, 1, onesided=False)[:,:,:,0]/np.sqrt(self.N)
ceps = self.nonlinear_func(ceps, self.g[gc], self.tc_idx)
else:
spec = torch.rfft(ceps, 1, onesided=False)[:,:,:,0]/np.sqrt(self.N)
spec = self.nonlinear_func(spec, self.g[gc], self.fc_idx)
return spec, ceps
def forward(self, x):
tfr0 = torch.stft(x, self.N, hop_length=self.hop_length, win_length=self.window_size,
window=self.h, onesided=False, pad_mode='constant')
tfr0 = torch.sqrt(tfr0.pow(2).sum(-1))/torch.norm(self.h) # calcuate magnitude
tfr0 = tfr0.transpose(1,2)[:,1:-1] #transpose F and T axis and discard first and last frames
# The transpose is necessary for rfft later
# (batch, timesteps, n_fft)
tfr, ceps = self._CFP(tfr0)
# return tfr0
# removing duplicate bins
tfr0 = tfr0[:,:,:int(round(self.N/2))]
tfr = tfr[:,:,:int(round(self.N/2))]
ceps = ceps[:,:,:int(round(self.N/2))]
# Crop up to the highest frequency
tfr0 = tfr0[:,:,:self.HighFreqIdx]
tfr = tfr[:,:,:self.HighFreqIdx]
ceps = ceps[:,:,:self.HighQuefIdx]
tfrL0 = torch.matmul(self.freq2logfreq_matrix, tfr0.transpose(1,2))
tfrLF = torch.matmul(self.freq2logfreq_matrix, tfr.transpose(1,2))
tfrLQ = torch.matmul(self.quef2logfreq_matrix, ceps.transpose(1,2))
Z = tfrLF * tfrLQ
# Only need to calculate this once
self.t = np.arange(self.hop_length,
np.ceil(len(x)/float(self.hop_length))*self.hop_length,
self.hop_length) # it won't be used but will be returned
return Z#, tfrL0, tfrLF, tfrLQ
def nonlinear_func(self, X, g, cutoff):
cutoff = int(cutoff)
if g!=0:
X = torch.relu(X)
X[:, :, :cutoff] = 0
X[:, :, -cutoff:] = 0
X = X.pow(g)
else: # when g=0, it converges to log
X = torch.log(X)
X[:, :, :cutoff] = 0
X[:, :, -cutoff:] = 0
return X
def create_logfreq_matrix(self, f, q, fr, fc, tc, NumPerOct, fs):
StartFreq = fc
StopFreq = 1/tc
Nest = int(np.ceil(np.log2(StopFreq/StartFreq))*NumPerOct)
central_freq = [] # A list holding the frequencies in log scale
for i in range(0, Nest):
CenFreq = StartFreq*pow(2, float(i)/NumPerOct)
if CenFreq < StopFreq:
central_freq.append(CenFreq)
else:
break
Nest = len(central_freq)
freq_band_transformation = np.zeros((Nest-1, len(f)), dtype=np.float)
# Calculating the freq_band_transformation
for i in range(1, Nest-1):
l = int(round(central_freq[i-1]/fr))
r = int(round(central_freq[i+1]/fr)+1)
#rounding1
if l >= r-1:
freq_band_transformation[i, l] = 1
else:
for j in range(l, r):
if f[j] > central_freq[i-1] and f[j] < central_freq[i]:
freq_band_transformation[i, j] = (f[j] - central_freq[i-1]) / (central_freq[i] - central_freq[i-1])
elif f[j] > central_freq[i] and f[j] < central_freq[i+1]:
freq_band_transformation[i, j] = (central_freq[i + 1] - f[j]) / (central_freq[i + 1] - central_freq[i])
# Calculating the quef_band_transformation
f = 1/q # divide by 0, do I need to fix this?
quef_band_transformation = np.zeros((Nest-1, len(f)), dtype=np.float)
for i in range(1, Nest-1):
for j in range(int(round(fs/central_freq[i+1])), int(round(fs/central_freq[i-1])+1)):
if f[j] > central_freq[i-1] and f[j] < central_freq[i]:
quef_band_transformation[i, j] = (f[j] - central_freq[i-1])/(central_freq[i] - central_freq[i-1])
elif f[j] > central_freq[i] and f[j] < central_freq[i+1]:
quef_band_transformation[i, j] = (central_freq[i + 1] - f[j]) / (central_freq[i + 1] - central_freq[i])
return freq_band_transformation, quef_band_transformation
| 96,009 | 41.976723 | 401 | py |
ReconVAT | ReconVAT-master/model/evaluate_functions.py | import argparse
import os
import sys
from collections import defaultdict
import numpy as np
from mir_eval.multipitch import evaluate as evaluate_frames
from mir_eval.transcription import precision_recall_f1_overlap as evaluate_notes
from mir_eval.transcription_velocity import precision_recall_f1_overlap as evaluate_notes_with_velocity
from mir_eval.util import midi_to_hz
import mir_eval
from sklearn.metrics import average_precision_score
from scipy.stats import hmean
from tqdm import tqdm
from model import *
eps = sys.float_info.epsilon
def evaluate_wo_velocity(data, model, onset_threshold=0.5, frame_threshold=0.5, save_path=None, reconstruction=True, onset=True, pseudo_onset=False, rule='rule2', VAT=False):
metrics = defaultdict(list)
for label in data:
if VAT==True:
pred, losses, _ = model.run_on_batch(label, None, False)
else:
pred, losses, _ = model.run_on_batch(label)
# print(f"pred['onset2'] = {pred['onset2'].shape}")
# print(f"pred['frame2'] = {pred['frame2'].shape}")
for key, loss in losses.items():
metrics[key].append(loss.item())
for key, value in pred.items():
if key in ['frame','onset', 'frame2', 'onset2']:
value.squeeze_(0).relu_()
if onset==True:
if pseudo_onset==True:
p_ref, i_ref = extract_notes_wo_velocity(label['onset'], label['frame'], rule=rule)
p_est, i_est = extract_notes_wo_velocity(label['onset'], pred['frame'], onset_threshold, frame_threshold, rule=rule)
else:
p_ref, i_ref = extract_notes_wo_velocity(label['onset'], label['frame'], rule=rule)
p_est, i_est = extract_notes_wo_velocity(pred['onset'], pred['frame'], onset_threshold, frame_threshold, rule=rule)
else:
p_ref, i_ref = extract_notes_wo_velocity(label['frame'], label['frame'], rule=rule)
p_est, i_est = extract_notes_wo_velocity(pred['frame'], pred['frame'], onset_threshold, frame_threshold, rule=rule)
# print(f"p_ref = {p_ref}\n p_est = {p_est}")
t_ref, f_ref = notes_to_frames(p_ref, i_ref, label['frame'].shape)
t_est, f_est = notes_to_frames(p_est, i_est, pred['frame'].shape)
scaling = HOP_LENGTH / SAMPLE_RATE
# Converting time steps to seconds and midi number to frequency
i_ref = (i_ref * scaling).reshape(-1, 2)
p_ref = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_ref])
i_est = (i_est * scaling).reshape(-1, 2)
p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])
t_ref = t_ref.astype(np.float64) * scaling
f_ref = [np.array([midi_to_hz(MIN_MIDI + midi) for midi in freqs]) for freqs in f_ref]
t_est = t_est.astype(np.float64) * scaling
f_est = [np.array([midi_to_hz(MIN_MIDI + midi) for midi in freqs]) for freqs in f_est]
p, r, f, o = evaluate_notes(i_ref, p_ref, i_est, p_est, offset_ratio=None)
metrics['metric/note/precision'].append(p)
metrics['metric/note/recall'].append(r)
metrics['metric/note/f1'].append(f)
metrics['metric/note/overlap'].append(o)
p, r, f, o = evaluate_notes(i_ref, p_ref, i_est, p_est)
metrics['metric/note-with-offsets/precision'].append(p)
metrics['metric/note-with-offsets/recall'].append(r)
metrics['metric/note-with-offsets/f1'].append(f)
metrics['metric/note-with-offsets/overlap'].append(o)
frame_metrics = evaluate_frames(t_ref, f_ref, t_est, f_est)
metrics['metric/frame/f1'].append(hmean([frame_metrics['Precision'] + eps, frame_metrics['Recall'] + eps]) - eps)
avp = average_precision_score(label['frame'].cpu().detach().flatten() ,pred['frame'].cpu().detach().flatten())
metrics['metric/MusicNet/micro_avg_P'].append(avp)
if reconstruction:
p_est2, i_est2 = extract_notes_wo_velocity(pred['onset2'], pred['frame2'], onset_threshold, frame_threshold)
t_est2, f_est2 = notes_to_frames(p_est2, i_est2, pred['frame2'].shape)
i_est2 = (i_est2 * scaling).reshape(-1, 2)
p_est2 = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est2])
t_est2 = t_est2.astype(np.float64) * scaling
f_est2 = [np.array([midi_to_hz(MIN_MIDI + midi) for midi in freqs]) for freqs in f_est2]
p2, r2, f2, o2 = evaluate_notes(i_ref, p_ref, i_est2, p_est2, offset_ratio=None)
metrics['metric/note/precision_2'].append(p2)
metrics['metric/note/recall_2'].append(r2)
metrics['metric/note/f1_2'].append(f2)
metrics['metric/note/overlap_2'].append(o2)
frame_metrics2 = evaluate_frames(t_ref, f_ref, t_est2, f_est2)
frame_metrics['Precision_2'] = frame_metrics2['Precision']
frame_metrics['Recall_2'] = frame_metrics2['Recall']
frame_metrics['accuracy_2'] = frame_metrics2['Accuracy']
metrics['metric/frame/f1_2'].append(hmean([frame_metrics['Precision_2'] + eps, frame_metrics['Recall_2'] + eps]) - eps)
avp = average_precision_score(label['frame'].cpu().detach().flatten() ,pred['frame2'].cpu().detach().flatten())
metrics['metric/MusicNet/micro_avg_P2'].append(avp)
p2, r2, f2, o2 = evaluate_notes(i_ref, p_ref, i_est2, p_est2)
metrics['metric/note-with-offsets/precision_2'].append(p2)
metrics['metric/note-with-offsets/recall_2'].append(r2)
metrics['metric/note-with-offsets/f1_2'].append(f2)
metrics['metric/note-with-offsets/overlap_2'].append(o2)
for key, loss in frame_metrics.items():
metrics['metric/frame/' + key.lower().replace(' ', '_')].append(loss)
if save_path is not None:
os.makedirs(save_path, exist_ok=True)
label_path = os.path.join(save_path, os.path.basename(label['path']) + '.label.png')
save_pianoroll(label_path, label['onset'], label['frame'])
pred_path = os.path.join(save_path, os.path.basename(label['path']) + '.pred.png')
save_pianoroll(pred_path, pred['onset'], pred['frame'])
midi_path = os.path.join(save_path, os.path.basename(label['path']) + '.pred.mid')
save_midi(midi_path, p_est, i_est, [127]*len(p_est))
return metrics
| 6,587 | 50.069767 | 174 | py |
ReconVAT | ReconVAT-master/model/Segmentation.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.nn.init as init
import numpy as np
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
def _l2_normalize(d, binwise):
# input shape (batch, timesteps, bins, ?)
if binwise==True:
d = d/(torch.abs(d)+1e-8)
else:
d = d/(torch.norm(d, dim=-1, keepdim=True))
return d
class Seg_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div, reconstruction=False):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
self.binwise = False
self.reconstruction = reconstruction
def forward(self, model, x):
with torch.no_grad():
y_ref = model(x) # This will be used as a label, therefore no need grad()
# if self.reconstruction:
# pianoroll, _ = model.transcriber(x)
# reconstruction, _ = self.reconstructor(pianoroll)
# pianoroll2_ref, _ = self.transcriber(reconstruction)
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
# if self.reconstruction:
# d2 = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
y_pred = model(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(y_pred, y_ref)
else:
loss =F.binary_cross_entropy(y_pred, y_ref)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e10
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
assert torch.isnan(r_adv).any()==False, f"r_adv has nan, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
assert torch.isnan(r_adv).any()==False, f"r_adv has inf, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
# print(f'd max = {d.max()}\td min = {d.min()}')
# print(f'r_adv max = {r_adv.max()}\tr_adv min = {r_adv.min()}')
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
y_pred = model(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(y_pred, y_ref)
else:
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv, _l2_normalize(d, binwise=self.binwise) # already averaged
def calculate_padding(input_size, kernel_size, stride):
def calculate_padding_1D(input_size, kernel_size, stride):
if (input_size%stride==0):
pad = max(kernel_size-stride, 0)
else:
pad = max(kernel_size-(input_size%stride), 0)
return pad
if type(kernel_size) != tuple:
kernel_size_1 = kernel_size
kernel_size_2 = kernel_size
else:
kernel_size_1 = kernel_size[0]
kernel_size_2 = kernel_size[1]
if type(stride) != tuple:
stride_1 = stride
stride_2 = stride
else:
stride_1 = stride[0]
stride_2 = stride[1]
padding1 = calculate_padding_1D(input_size[0], kernel_size_1, stride_1)
padding2 = calculate_padding_1D(input_size[1], kernel_size_2, stride_2)
pad_top = padding1//2
pad_bottom = padding1 - pad_top
pad_left = padding2//2
pad_right = padding2 - pad_left
return (pad_left,pad_right,pad_top,pad_bottom)
def transpose_padding_same(x, input_shape, stride):
"""
Trying to implement padding='SAME' as in tensorflow for the Conv2dTranspose layer.
It is basically trying to remove paddings from the output
"""
input_shape = torch.tensor(input_shape[2:])*torch.tensor(stride)
output_shape = torch.tensor(x.shape[2:])
if torch.equal(input_shape,output_shape):
print(f'same, no need to do anything')
pass
else:
padding_remove = (output_shape-input_shape)
left = padding_remove//2
right = padding_remove//2+padding_remove%2
return x[:,:,left[0]:-right[0],left[1]:-right[1]]
def SAME_padding(x, ksize, stride):
padding = calculate_padding(x.shape[2:], ksize, stride)
return F.pad(x, padding)
class Conv_Block(nn.Module):
def __init__(self, inp, out, ksize, stride=(2,2), dilation_rate=1, dropout_rate=0.4):
super().__init__()
self.ksize = ksize
self.stride = stride
self.stride_conv2 = 1
self.ksize_skip = 1
padding=0 # We don't pad with the Conv2d class, we use F.pad to pad instead
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation_rate)
self.bn1 = nn.BatchNorm2d(inp)
self.dropout1 = nn.Dropout(dropout_rate)
self.conv2 = nn.Conv2d(out, out, kernel_size=ksize, stride=self.stride_conv2, padding=padding, dilation=dilation_rate)
self.bn2 = nn.BatchNorm2d(out)
self.dropout2 = nn.Dropout(dropout_rate)
self.conv_skip = nn.Conv2d(inp, out, kernel_size=self.ksize_skip, stride=stride, padding=padding)
def forward(self, x):
skip = x # save a copy for the skip connection later
x = self.bn1(torch.relu(x))
x = self.dropout1(x)
# Calculating padding corresponding to 'SAME' in tf
x = SAME_padding(x, self.ksize, self.stride)
x = self.conv1(x)
x = self.bn2(torch.relu(x))
x = self.dropout2(x)
# Calculating padding corresponding to 'SAME' in tf
x = SAME_padding(x, self.ksize, self.stride_conv2)
x = self.conv2(x)
if self.stride!=(1,1):
# Calculating padding corresponding to 'SAME' in tf
skip = SAME_padding(skip, self.ksize_skip, self.stride)
# Padding is mostly 0 so far, comment it out first
skip = self.conv_skip(skip)
x = x + skip # skip connection
return x
class transpose_conv_block(nn.Module):
def __init__(self, inp, out, ksize, stride=(2,2), dropout_rate=0.4):
super().__init__()
self.stride = stride
self.ksize = ksize
padding=0 # We don't pad with the Conv2d class, we use F.pad to pad instead
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, stride=(1,1), padding=padding)
self.bn1 = nn.BatchNorm2d(inp)
self.dropout1 = nn.Dropout(dropout_rate)
self.conv2 = nn.ConvTranspose2d(out, out, kernel_size=ksize, stride=stride, padding=padding)
self.bn2 = nn.BatchNorm2d(out)
self.dropout2 = nn.Dropout(dropout_rate)
self.conv_skip = nn.ConvTranspose2d(inp, out, kernel_size=1, stride=stride, padding=padding)
def forward(self, x, shape):
skip = x # save a copy for the skip connection later
input_shape_skip = skip.shape # will be used as in the transpose padding later
x = self.bn1(torch.relu(x))
x = self.dropout1(x)
x = SAME_padding(x, self.ksize, (1,1))
x = self.conv1(x)
# transpose_conv1 = torch.Size([1, 128, 40, 15])
x = self.bn2(torch.relu(x))
x = self.dropout2(x)
input_shape = x.shape
x = self.conv2(x)
x = transpose_padding_same(x, input_shape, self.stride)
# Removing extra pixels induced due to ConvTranspose
if x.shape[2]>shape[2]:
x = x[:,:,:-1,:]
if x.shape[3]>shape[3]:
x = x[:,:,:,:-1]
# transpose_conv2 = torch.Size([1, 128, 83, 35])
if self.stride!=(1,1):
# Check keras about the transConv output shape
skip = self.conv_skip(skip, output_size=x.shape) # make output size same as x
# skip = transpose_padding_same(skip, input_shape_skip, self.stride)
x = x + skip # skip connection
return x
class Decoder_Block(nn.Module):
def __init__(self,
input_channels,
encoder_channels,
hidden_channels,
output_channels,
dropout_rate=0.4):
super().__init__()
# Again, not using Conv2d to calculate the padding,
# use F.pad to obtain a more general padding under forward
self.ksize = (1,1)
self.stride = (1,1)
self.layer1a = nn.Conv2d(input_channels+encoder_channels, hidden_channels, kernel_size=self.ksize, stride=self.stride) # the channel dim for feature
self.bn = nn.BatchNorm2d(input_channels)
self.bn_en = nn.BatchNorm2d(encoder_channels)
self.dropout1 = nn.Dropout(dropout_rate)
self.layer1b = transpose_conv_block(input_channels, output_channels, (3,3), (2,2))
def forward(self, x, encoder_output, encoder_shape):
skip = x # save a copy for the skip connection later
x = self.bn(torch.relu(x))
en_l = self.bn_en(torch.relu(encoder_output))
x = torch.cat((x, en_l), 1)
x = self.dropout1(x)
x = SAME_padding(x, self.ksize, self.stride)
x = self.layer1a(x)
x = x + skip
x = self.layer1b(x, encoder_shape)
return x
class MutliHeadAttention2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3,3), stride=(1,1), groups=1, bias=False):
"""kernel_size is the 2D local attention window size"""
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding_time = (kernel_size[0]-1)//2
self.padding_freq = (kernel_size[1]-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_channels % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
# Relative position encoding
self.rel_t = nn.Parameter(torch.randn(out_channels // 2, 1, 1, kernel_size[0], 1), requires_grad=True)
self.rel_f = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1, kernel_size[1]), requires_grad=True)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, channels, height, width = x.size()
padded_x = F.pad(x, [self.padding_freq, self.padding_freq, self.padding_time, self.padding_time])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size[0], self.stride[0]).unfold(3, self.kernel_size[1], self.stride[1])
# (batch, channels, H, W, H_local_w, W_local_w)
v_out = v_out.unfold(2, self.kernel_size[0], self.stride[0]).unfold(3, self.kernel_size[1], self.stride[1])
# (batch, channels, H, W, H_local_w, W_local_w)
k_out_t, k_out_f = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_t + self.rel_t, k_out_f + self.rel_f), dim=1) # relative position?
k_out = k_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
# (batch, n_heads, feature_per_head, H, W, local H X W)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, self.groups, self.out_channels // self.groups, height, width, 1)
# (batch, n_heads, feature_per_head, H, W, 1)
# Alternative way to express dot product
# same as k_out = k_out.permute(0,1,3,4,2,5)
# and then energy = torch.matmul(q_out,k_out)
energy = (q_out * k_out).sum(dim=2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, n_heads, 1, H, W, local HXW)
out = attention*v_out
# (batch, n_heads, feature_per_head, H, W, local HXW)
# (batch, c, H, W)
return out.sum(-1).flatten(1,2), attention.squeeze(2)
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out', nonlinearity='relu')
init.normal_(self.rel_t, 0, 1)
init.normal_(self.rel_f, 0, 1)
class Encoder(nn.Module):
def __init__(self,
input_size,
feature_num=128,
timesteps=256,
multi_grid_layer_n=1,
multi_grid_n=3,
ch_num=1,
prog=False,
dropout_rate=0.4,
out_class=2):
super().__init__()
# Parameters for the encoding layer
en_kernel_size = (7,7)
en_stride = (1,1)
# Again, not using Conv2d to calculate the padding,
# use F.pad to obtain a more general padding under forward
self.en_padding = calculate_padding(input_size, en_kernel_size, en_stride)
# Instead of using Z, it should be using Z_f and Z_q
# But for the sake of this experiment,
self.encoding_layer = nn.Conv2d(1, 2**5, kernel_size=en_kernel_size, stride=en_stride, padding=0)
self.layer1a = Conv_Block(2**5, 2**5, ksize=(3,3), stride=(2,2), dropout_rate=dropout_rate)
self.layer1b = Conv_Block(2**5, 2**5, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer2a = Conv_Block(2**5, 2**6, ksize=(3,3), stride=(2,2), dropout_rate=dropout_rate)
self.layer2b = Conv_Block(2**6, 2**6, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer2c = Conv_Block(2**6, 2**6, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer3a = Conv_Block(2**6, 2**7, ksize=(3,3), stride=(2,2), dropout_rate=dropout_rate)
self.layer3b = Conv_Block(2**7, 2**7, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer3c = Conv_Block(2**7, 2**7, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer3d = Conv_Block(2**7, 2**7, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer4a = Conv_Block(2**7, 2**8, ksize=(3,3), stride=(2,2), dropout_rate=dropout_rate)
self.layer4b = Conv_Block(2**8, 2**8, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer4c = Conv_Block(2**8, 2**8, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer4d = Conv_Block(2**8, 2**8, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer4e = Conv_Block(2**8, 2**8, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
def forward(self, x):
skip = x # save a copy for the skip connection later
original_shape = x.shape
x = F.pad(x, self.en_padding)
x = self.encoding_layer(x)
x = self.layer1a(x)
x = self.layer1b(x)
en_l1 = x
shape1 = x.shape
x = self.layer2a(x)
x = self.layer2b(x)
x = self.layer2c(x)
shape2 = x.shape
en_l2 = x
x = self.layer3a(x)
x = self.layer3b(x)
x = self.layer3c(x)
x = self.layer3d(x)
shape3 = x.shape
en_l3 = x
x = self.layer4a(x)
x = self.layer4b(x)
x = self.layer4c(x)
x = self.layer4d(x)
x = self.layer4e(x)
shape4 = x.shape
en_l4 = x
# en_l4 and shape4 could not be used inside the decoder, that's why they are omitted
return x, (en_l1, en_l2, en_l3), (original_shape, shape1, shape2, shape3)
class Decoder(nn.Module):
def __init__(self,
dropout_rate=0.4):
super().__init__()
self.de_layer1 = Decoder_Block(2**7, 2**7, 2**7, 2**6, dropout_rate)
self.de_layer2 = Decoder_Block(2**6, 2**6, 2**6, 2**6, dropout_rate)
self.de_layer3 = Decoder_Block(2**6, 2**5, 2**6, 2**6, dropout_rate)
def forward(self, x, encoder_outputs, encoder_shapes):
x = self.de_layer1(x, encoder_outputs[-1], encoder_shapes[-2])
x = self.de_layer2(x, encoder_outputs[-2], encoder_shapes[-3])
x = self.de_layer3(x, encoder_outputs[-3], encoder_shapes[-4]) # Check this
return x
class Semantic_Segmentation(nn.Module):
def __init__(self, x, out_class=2, dropout_rate=0.4, log=True,
mode='imagewise', spec='Mel', device='cpu', XI=1e-6, eps=1e-2):
super().__init__()
global N_BINS # using the N_BINS parameter from constant.py
# Selecting the type of spectrogram to use
if spec == 'CQT':
r=2
N_BINS = 88*r
self.spectrogram = Spectrogram.CQT1992v2(sr=SAMPLE_RATE, hop_length=HOP_LENGTH,
n_bins=N_BINS, fmin=27.5,
bins_per_octave=12*r, trainable=False)
elif spec == 'Mel':
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
elif spec == 'CFP':
self.spectrogram = Spectrogram.CFP(fs=SAMPLE_RATE,
fr=4,
window_size=WINDOW_LENGTH,
hop_length=HOP_LENGTH,
fc=MEL_FMIN,
tc=1/MEL_FMAX)
N_BINS = self.spectrogram.quef2logfreq_matrix.shape[0]
else:
print(f'Please select a correct spectrogram')
self.log = log
self.normalize = Normalization(mode)
self.vat_loss = Seg_VAT(XI, eps, 1, False)
self.encoder = Encoder((x.shape[2:]), dropout_rate=dropout_rate)
self.attention_layer1 = MutliHeadAttention2D(256, 64, kernel_size=(17,17), stride=(1,1), groups=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.attention_layer2 = MutliHeadAttention2D(64, 128, kernel_size=(17,17), stride=(1,1), groups=1, bias=False)
self.bn2 = nn.BatchNorm2d(128)
# L218-221 of the original code
# Few layers before the Decoder part
self.layer0a = nn.Conv2d(384, 2**8, (1,1), (1,1))
self.layer0b = transpose_conv_block(2**8, 2**7, (3,3), (2,2))
self.decoder = Decoder(dropout_rate=dropout_rate)
# Last few layers that determines the output
self.bn_last = nn.BatchNorm2d(2**6)
self.dropout_last = nn.Dropout(dropout_rate)
self.conv_last = nn.Conv2d(2**6, out_class, (1,1), (1,1))
self.inference_model = nn.Linear(x.shape[-1], 88)
def forward(self, x):
x, encoder_outputs, encoder_shapes = self.encoder(x)
en_l4 = x # Will be appened with the attention output and decoder later
# Two layers of self-attention
x,_ = self.attention_layer1(en_l4)
x = self.bn1(torch.relu(x))
x, _ = self.attention_layer2(x)
x = self.bn2(torch.relu(x))
x = torch.cat((en_l4, x),1) # L216
# L218-221 of the original code
# Few layers before the Decoder part
x = SAME_padding(x, (1,1), (1,1))
x = self.layer0a(x)
x = x + en_l4
x = self.layer0b(x, encoder_shapes[-1]) # Transposing back to the Encoder shape
# Decoder part
x = self.decoder(x, encoder_outputs, encoder_shapes)
# Last few layers for the output block
x = self.bn_last(torch.relu(x))
x = self.dropout_last(x)
x = self.conv_last(x)
# We use a Linear layer as the inference model here
x = x.squeeze(1) # remove the channel dim
x = self.inference_model(x)
x = torch.sigmoid(x)
return x
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2).unsqueeze(1)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
r_adv = r_adv.squeeze(1) # remove the channel dimension
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
frame_pred = self(spec)
if self.training:
predictions = {
'onset': frame_pred,
'frame': frame_pred,
'r_adv': r_adv,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
def transcribe(self, batch):
audio_label = batch['audio']
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
pianoroll = self(spec)
predictions = {
'onset': pianoroll,
'frame': pianoroll,
}
return predictions
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param) | 25,776 | 39.15109 | 156 | py |
ReconVAT | ReconVAT-master/model/midi.py | import multiprocessing
import sys
import mido
import numpy as np
from joblib import Parallel, delayed
from mido import Message, MidiFile, MidiTrack
from mir_eval.util import hz_to_midi
from tqdm import tqdm
def parse_midi(path):
"""open midi file and return np.array of (onset, offset, note, velocity) rows"""
midi = mido.MidiFile(path)
time = 0
sustain = False
events = []
for message in midi:
time += message.time
if message.type == 'control_change' and message.control == 64 and (message.value >= 64) != sustain:
# sustain pedal state has just changed
sustain = message.value >= 64
event_type = 'sustain_on' if sustain else 'sustain_off'
event = dict(index=len(events), time=time, type=event_type, note=None, velocity=0)
events.append(event)
if 'note' in message.type:
# MIDI offsets can be either 'note_off' events or 'note_on' with zero velocity
velocity = message.velocity if message.type == 'note_on' else 0
event = dict(index=len(events), time=time, type='note', note=message.note, velocity=velocity, sustain=sustain)
events.append(event)
notes = []
for i, onset in enumerate(events):
if onset['velocity'] == 0:
continue
# find the next note_off message
offset = next(n for n in events[i + 1:] if n['note'] == onset['note'] or n is events[-1])
if offset['sustain'] and offset is not events[-1]:
# if the sustain pedal is active at offset, find when the sustain ends
offset = next(n for n in events[offset['index'] + 1:] if n['type'] == 'sustain_off' or n is events[-1])
note = (onset['time'], offset['time'], onset['note'], onset['velocity'])
notes.append(note)
return np.array(notes)
def save_midi(path, pitches, intervals, velocities):
"""
Save extracted notes as a MIDI file
Parameters
----------
path: the path to save the MIDI file
pitches: np.ndarray of bin_indices
intervals: list of (onset_index, offset_index)
velocities: list of velocity values
"""
file = MidiFile()
track = MidiTrack()
file.tracks.append(track)
ticks_per_second = file.ticks_per_beat * 2.0
events = []
for i in range(len(pitches)):
events.append(dict(type='on', pitch=pitches[i], time=intervals[i][0], velocity=velocities[i]))
events.append(dict(type='off', pitch=pitches[i], time=intervals[i][1], velocity=velocities[i]))
events.sort(key=lambda row: row['time'])
last_tick = 0
for event in events:
current_tick = int(event['time'] * ticks_per_second)
velocity = int(event['velocity'] * 127)
if velocity > 127:
velocity = 127
pitch = int(round(hz_to_midi(event['pitch'])))
track.append(Message('note_' + event['type'], note=pitch, velocity=velocity, time=current_tick - last_tick))
last_tick = current_tick
file.save(path)
if __name__ == '__main__':
def process(input_file, output_file):
midi_data = parse_midi(input_file)
np.savetxt(output_file, midi_data, '%.6f', '\t', header='onset\toffset\tnote\tvelocity')
def files():
for input_file in tqdm(sys.argv[1:]):
if input_file.endswith('.mid'):
output_file = input_file[:-4] + '.tsv'
elif input_file.endswith('.midi'):
output_file = input_file[:-5] + '.tsv'
else:
print('ignoring non-MIDI file %s' % input_file, file=sys.stderr)
continue
yield (input_file, output_file)
Parallel(n_jobs=multiprocessing.cpu_count())(delayed(process)(in_file, out_file) for in_file, out_file in files())
| 3,796 | 34.485981 | 122 | py |
DFMGAN | DFMGAN-main/legacy.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import click
import pickle
import re
import copy
import numpy as np
import torch
import dnnlib
from torch_utils import misc
#----------------------------------------------------------------------------
def load_network_pkl(f, force_fp16=False):
data = _LegacyUnpickler(f).load()
# Legacy TensorFlow pickle => convert.
if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
tf_G, tf_D, tf_Gs = data
G = convert_tf_generator(tf_G)
D = convert_tf_discriminator(tf_D)
G_ema = convert_tf_generator(tf_Gs)
data = dict(G=G, D=D, G_ema=G_ema)
# Add missing fields.
if 'training_set_kwargs' not in data:
data['training_set_kwargs'] = None
if 'augment_pipe' not in data:
data['augment_pipe'] = None
# Validate contents.
assert isinstance(data['G'], torch.nn.Module)
assert isinstance(data['D'], torch.nn.Module)
assert isinstance(data['G_ema'], torch.nn.Module)
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
# Force FP16.
if force_fp16:
for key in ['G', 'D', 'G_ema']:
old = data[key]
kwargs = copy.deepcopy(old.init_kwargs)
if key.startswith('G'):
kwargs.synthesis_kwargs = dnnlib.EasyDict(kwargs.get('synthesis_kwargs', {}))
kwargs.synthesis_kwargs.num_fp16_res = 4
kwargs.synthesis_kwargs.conv_clamp = 256
if key.startswith('D'):
kwargs.num_fp16_res = 4
kwargs.conv_clamp = 256
if kwargs != old.init_kwargs:
new = type(old)(**kwargs).eval().requires_grad_(False)
misc.copy_params_and_buffers(old, new, require_all=True)
data[key] = new
return data
#----------------------------------------------------------------------------
class _TFNetworkStub(dnnlib.EasyDict):
pass
class _LegacyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'dnnlib.tflib.network' and name == 'Network':
return _TFNetworkStub
return super().find_class(module, name)
#----------------------------------------------------------------------------
def _collect_tf_params(tf_net):
# pylint: disable=protected-access
tf_params = dict()
def recurse(prefix, tf_net):
for name, value in tf_net.variables:
tf_params[prefix + name] = value
for name, comp in tf_net.components.items():
recurse(prefix + name + '/', comp)
recurse('', tf_net)
return tf_params
#----------------------------------------------------------------------------
def _populate_module_params(module, *patterns):
for name, tensor in misc.named_params_and_buffers(module):
found = False
value = None
for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
match = re.fullmatch(pattern, name)
if match:
found = True
if value_fn is not None:
value = value_fn(*match.groups())
break
try:
assert found
if value is not None:
tensor.copy_(torch.from_numpy(np.array(value)))
except:
print(name, list(tensor.shape))
raise
#----------------------------------------------------------------------------
def convert_tf_generator(tf_G):
if tf_G.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_G.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None, none=None):
known_kwargs.add(tf_name)
val = tf_kwargs.get(tf_name, default)
return val if val is not None else none
# Convert kwargs.
kwargs = dnnlib.EasyDict(
z_dim = kwarg('latent_size', 512),
c_dim = kwarg('label_size', 0),
w_dim = kwarg('dlatent_size', 512),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 8),
embed_features = kwarg('label_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('mapping_nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.01),
w_avg_beta = kwarg('w_avg_beta', 0.995, none=1),
),
synthesis_kwargs = dnnlib.EasyDict(
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
architecture = kwarg('architecture', 'skip'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
use_noise = kwarg('use_noise', True),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('truncation_psi')
kwarg('truncation_cutoff')
kwarg('style_mixing_prob')
kwarg('structure')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_G)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
kwargs.synthesis.kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks
G = networks.Generator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
_populate_module_params(G,
r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'],
r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0],
r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'],
r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(),
r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'],
r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'],
r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'],
r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'],
r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'.*\.resample_filter', None,
)
return G
#----------------------------------------------------------------------------
def convert_tf_discriminator(tf_D):
if tf_D.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_D.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None):
known_kwargs.add(tf_name)
return tf_kwargs.get(tf_name, default)
# Convert kwargs.
kwargs = dnnlib.EasyDict(
c_dim = kwarg('label_size', 0),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
architecture = kwarg('architecture', 'resnet'),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
cmap_dim = kwarg('mapping_fmaps', None),
block_kwargs = dnnlib.EasyDict(
activation = kwarg('nonlinearity', 'lrelu'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
freeze_layers = kwarg('freeze_layers', 0),
),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 0),
embed_features = kwarg('mapping_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.1),
),
epilogue_kwargs = dnnlib.EasyDict(
mbstd_group_size = kwarg('mbstd_group_size', None),
mbstd_num_channels = kwarg('mbstd_num_features', 1),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('structure')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_D)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks
D = networks.Discriminator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
_populate_module_params(D,
r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1),
r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(),
r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(),
r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
r'.*\.resample_filter', None,
)
return D
#----------------------------------------------------------------------------
@click.command()
@click.option('--source', help='Input pickle', required=True, metavar='PATH')
@click.option('--dest', help='Output pickle', required=True, metavar='PATH')
@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
def convert_network_pickle(source, dest, force_fp16):
"""Convert legacy network pickle into the native PyTorch format.
The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
Example:
\b
python legacy.py \\
--source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
--dest=stylegan2-cat-config-f.pkl
"""
print(f'Loading "{source}"...')
with dnnlib.util.open_url(source) as f:
data = load_network_pkl(f, force_fp16=force_fp16)
print(f'Saving "{dest}"...')
with open(dest, 'wb') as f:
pickle.dump(data, f)
print('Done.')
#----------------------------------------------------------------------------
if __name__ == "__main__":
convert_network_pickle() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 16,502 | 50.411215 | 154 | py |
DFMGAN | DFMGAN-main/style_mixing.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate style mixing image matrix using pretrained network pickle."""
import os
import re
from typing import List
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
import legacy
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--rows', 'row_seeds', type=num_range, help='Random seeds to use for image rows', required=True)
@click.option('--cols', 'col_seeds', type=num_range, help='Random seeds to use for image columns', required=True)
@click.option('--styles', 'col_styles', type=num_range, help='Style layer range', default='0-6', show_default=True)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--outdir', type=str, required=True)
def generate_style_mix(
network_pkl: str,
row_seeds: List[int],
col_seeds: List[int],
col_styles: List[int],
truncation_psi: float,
noise_mode: str,
outdir: str
):
"""Generate images using pretrained network pickle.
Examples:
\b
python style_mixing.py --outdir=out --rows=85,100,75,458,1500 --cols=55,821,1789,293 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
os.makedirs(outdir, exist_ok=True)
print('Generating W vectors...')
all_seeds = list(set(row_seeds + col_seeds))
all_z = np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])
all_w = G.mapping(torch.from_numpy(all_z).to(device), None)
w_avg = G.mapping.w_avg
all_w = w_avg + (all_w - w_avg) * truncation_psi
w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))}
print('Generating images...')
all_images = G.synthesis(all_w, noise_mode=noise_mode)
all_images = (all_images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))}
print('Generating style-mixed images...')
for row_seed in row_seeds:
for col_seed in col_seeds:
w = w_dict[row_seed].clone()
w[col_styles] = w_dict[col_seed][col_styles]
image = G.synthesis(w[np.newaxis], noise_mode=noise_mode)
image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
image_dict[(row_seed, col_seed)] = image[0].cpu().numpy()
print('Saving images...')
os.makedirs(outdir, exist_ok=True)
for (row_seed, col_seed), image in image_dict.items():
PIL.Image.fromarray(image, 'RGB').save(f'{outdir}/{row_seed}-{col_seed}.png')
print('Saving image grid...')
W = G.img_resolution
H = G.img_resolution
canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')
for row_idx, row_seed in enumerate([0] + row_seeds):
for col_idx, col_seed in enumerate([0] + col_seeds):
if row_idx == 0 and col_idx == 0:
continue
key = (row_seed, col_seed)
if row_idx == 0:
key = (col_seed, col_seed)
if col_idx == 0:
key = (row_seed, row_seed)
canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx))
canvas.save(f'{outdir}/grid.png')
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_style_mix() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 4,891 | 40.109244 | 132 | py |
DFMGAN | DFMGAN-main/projector.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Project given image to the latent space of pretrained network pickle."""
import copy
import os
from time import perf_counter
import click
import imageio
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
import dnnlib
import legacy
def project(
G,
target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
*,
num_steps = 1000,
w_avg_samples = 10000,
initial_learning_rate = 0.1,
initial_noise_factor = 0.05,
lr_rampdown_length = 0.25,
lr_rampup_length = 0.05,
noise_ramp_length = 0.75,
regularize_noise_weight = 1e5,
verbose = False,
device: torch.device
):
assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution)
def logprint(*args):
if verbose:
print(*args)
G = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore
# Compute w stats.
logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')
z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]
w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]
w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]
w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
# Setup noise inputs.
noise_bufs = { name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name }
# Load VGG16 feature detector.
url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
with dnnlib.util.open_url(url) as f:
vgg16 = torch.jit.load(f).eval().to(device)
# Features for target image.
target_images = target.unsqueeze(0).to(device).to(torch.float32)
if target_images.shape[2] > 256:
target_images = F.interpolate(target_images, size=(256, 256), mode='area')
target_features = vgg16(target_images, resize_images=False, return_lpips=True)
w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable
w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device)
optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), lr=initial_learning_rate)
# Init noise.
for buf in noise_bufs.values():
buf[:] = torch.randn_like(buf)
buf.requires_grad = True
for step in range(num_steps):
# Learning rate schedule.
t = step / num_steps
w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
lr = initial_learning_rate * lr_ramp
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Synth images from opt_w.
w_noise = torch.randn_like(w_opt) * w_noise_scale
ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1])
synth_images = G.synthesis(ws, noise_mode='const')
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
synth_images = (synth_images + 1) * (255/2)
if synth_images.shape[2] > 256:
synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')
# Features for synth images.
synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)
dist = (target_features - synth_features).square().sum()
# Noise regularization.
reg_loss = 0.0
for v in noise_bufs.values():
noise = v[None,None,:,:] # must be [1,1,H,W] for F.avg_pool2d()
while True:
reg_loss += (noise*torch.roll(noise, shifts=1, dims=3)).mean()**2
reg_loss += (noise*torch.roll(noise, shifts=1, dims=2)).mean()**2
if noise.shape[2] <= 8:
break
noise = F.avg_pool2d(noise, kernel_size=2)
loss = dist + reg_loss * regularize_noise_weight
# Step
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
logprint(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
# Save projected W for each optimization step.
w_out[step] = w_opt.detach()[0]
# Normalize noise.
with torch.no_grad():
for buf in noise_bufs.values():
buf -= buf.mean()
buf *= buf.square().mean().rsqrt()
return w_out.repeat([1, G.mapping.num_ws, 1])
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--target', 'target_fname', help='Target image file to project to', required=True, metavar='FILE')
@click.option('--num-steps', help='Number of optimization steps', type=int, default=1000, show_default=True)
@click.option('--seed', help='Random seed', type=int, default=303, show_default=True)
@click.option('--save-video', help='Save an mp4 video of optimization progress', type=bool, default=True, show_default=True)
@click.option('--outdir', help='Where to save the output images', required=True, metavar='DIR')
def run_projection(
network_pkl: str,
target_fname: str,
outdir: str,
save_video: bool,
seed: int,
num_steps: int
):
"""Project given image to the latent space of pretrained network pickle.
Examples:
\b
python projector.py --outdir=out --target=~/mytargetimg.png \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
"""
np.random.seed(seed)
torch.manual_seed(seed)
# Load networks.
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as fp:
G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore
# Load target image.
target_pil = PIL.Image.open(target_fname).convert('RGB')
w, h = target_pil.size
s = min(w, h)
target_pil = target_pil.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
target_pil = target_pil.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS)
target_uint8 = np.array(target_pil, dtype=np.uint8)
# Optimize projection.
start_time = perf_counter()
projected_w_steps = project(
G,
target=torch.tensor(target_uint8.transpose([2, 0, 1]), device=device), # pylint: disable=not-callable
num_steps=num_steps,
device=device,
verbose=True
)
print (f'Elapsed: {(perf_counter()-start_time):.1f} s')
# Render debug output: optional video and projected image and W vector.
os.makedirs(outdir, exist_ok=True)
if save_video:
video = imageio.get_writer(f'{outdir}/proj.mp4', mode='I', fps=10, codec='libx264', bitrate='16M')
print (f'Saving optimization progress video "{outdir}/proj.mp4"')
for projected_w in projected_w_steps:
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
video.append_data(np.concatenate([target_uint8, synth_image], axis=1))
video.close()
# Save final projected frame and W vector.
target_pil.save(f'{outdir}/target.png')
projected_w = projected_w_steps[-1]
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
PIL.Image.fromarray(synth_image, 'RGB').save(f'{outdir}/proj.png')
np.savez(f'{outdir}/projected_w.npz', w=projected_w.unsqueeze(0).cpu().numpy())
#----------------------------------------------------------------------------
if __name__ == "__main__":
run_projection() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 8,990 | 41.211268 | 136 | py |
DFMGAN | DFMGAN-main/generate.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate images using pretrained network pickle."""
import os
import re
from typing import List, Optional
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
from tqdm import tqdm
import legacy
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=num_range, help='List of random seeds')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--projected-w', help='Projection result file', type=str, metavar='FILE')
@click.option('--output', help='Where to save the output images', type=str, metavar='FILE', default = None)
@click.option('--cmp', help='Generate images for comparison', type=bool, metavar='BOOL', is_flag=True)
@click.option('--gen-good', help='Generate good images along with images', type=bool, metavar='BOOL', is_flag=True)
@click.option('--gen-mask', help='Generate masks along with images', type=bool, metavar='BOOL', is_flag=True)
@click.option('--num', help='Total number of generated images. Only when --seeds is unspecified. [default: 10 for cmp mode, 500 otherwise]', type=int)
def generate_images(
ctx: click.Context,
network_pkl: str,
seeds: Optional[List[int]],
truncation_psi: float,
noise_mode: str,
output: str,
class_idx: Optional[int],
projected_w: Optional[str],
cmp: bool,
gen_mask: bool,
gen_good: bool,
num: int,
):
"""Generate images using pretrained network pickle.
Examples:
\b
# Generate curated MetFaces images without truncation (Fig.10 left)
python generate.py --outdir=out --trunc=1 --seeds=85,265,297,849 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate uncurated MetFaces images with truncation (Fig.12 upper left)
python generate.py --outdir=out --trunc=0.7 --seeds=600-605 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate class conditional CIFAR-10 images (Fig.17 left, Car)
python generate.py --outdir=out --seeds=0-35 --class=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/cifar10.pkl
\b
# Render an image from projected W
python generate.py --outdir=out --projected_w=projected_w.npz \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
# Synthesize the result of a W projection.
if projected_w is not None:
if seeds is not None:
print ('warn: --seeds is ignored when using --projected-w')
print(f'Generating images from projected W "{projected_w}"')
ws = np.load(projected_w)['w']
ws = torch.tensor(ws, device=device) # pylint: disable=not-callable
assert ws.shape[1:] == (G.num_ws, G.w_dim)
for idx, w in enumerate(ws):
img = G.synthesis(w.unsqueeze(0), noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/proj{idx:02d}.png')
return
if seeds is None:
if num is None:
if cmp:
seeds = [x for x in range(10)]
else:
seeds = [x for x in range(100)]
else:
seeds = [x for x in range(num)]
#ctx.fail('--seeds option is required when not using --projected-w')
# Labels.
label = torch.zeros([1, G.c_dim], device=device)
if G.c_dim != 0:
if class_idx is None:
ctx.fail('Must specify class label with --class when using a conditional network')
label[:, class_idx] = 1
else:
if class_idx is not None:
print ('warn: --class=lbl ignored when running on an unconditional network')
# Generate images.
if output is not None and (not output.endswith('.png')):
os.makedirs(output, exist_ok=True)
if cmp and output is None:
assert network_pkl[-4:] == '.pkl'
kimg = network_pkl[-10:-4]
output = os.path.join(os.path.dirname(network_pkl), f'cmp{kimg}.png')
if not cmp and output is None:
print('--output must be specified when not using cmp mode')
exit(1)
if cmp:
canvas = []
for seed_idx, seed in tqdm(enumerate(seeds)):
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
if hasattr(G, 'transfer'):
transfer = (G.transfer != 'none')
else:
transfer = False
if transfer:
defect_z = torch.from_numpy(np.random.RandomState(seed + len(seeds)).randn(1, G.z_dim)).to(device)
ws = G.mapping(z, None)
defect_ws = G.defect_mapping(defect_z, label, truncation_psi=truncation_psi)
if G.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
img, mask = G.synthesis(ws, defect_ws, noise_mode=noise_mode, output_mask = True, fix_residual_to_zero = False)
good_img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, output_mask = False, fix_residual_to_zero = True)
mask = torch.where(mask >= 0.0, 1.0, -1.0)
img = torch.cat([good_img, mask.repeat((1, 3, 1, 1)), img], dim = 2)
else:
img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, fix_residual_to_zero = False)
good_img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, fix_residual_to_zero = True)
mask = torch.where(mask >= 0.0, 1.0, -1.0)
img = torch.cat([good_img, img], dim = 2)
else:
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
canvas.append(img)
img = torch.cat(canvas, dim = 3)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
if not output.endswith('.png'):
output += '.png'
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{output}')
else:
for seed_idx, seed in tqdm(enumerate(seeds)):
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
if hasattr(G, 'transfer'):
transfer = (G.transfer != 'none')
else:
transfer = False
mask = None
if transfer:
defect_z = torch.from_numpy(np.random.RandomState(seed + len(seeds)).randn(1, G.z_dim)).to(device)
ws = G.mapping(z, None)
defect_ws = G.defect_mapping(defect_z, label, truncation_psi=truncation_psi)
if G.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
img, mask = G.synthesis(ws, defect_ws, noise_mode=noise_mode, output_mask = True, fix_residual_to_zero = False)
if gen_good:
good_img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, output_mask = False, fix_residual_to_zero = True)
mask = torch.where(mask >= 0.0, 1.0, -1.0).repeat(1, 3, 1, 1)
else:
img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, fix_residual_to_zero = False)
mask = torch.where(mask >= 0.0, 1.0, -1.0).repeat(1, 3, 1, 1)
else:
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
img = ((img.permute(0, 2, 3, 1) + 1.0) * 127.5).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(os.path.join(output, '%d_img.png' % seed_idx))
if gen_mask and (mask is not None):
mask = ((mask.permute(0, 2, 3, 1) + 1.0) * 127.5).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(mask[0].cpu().numpy(), 'RGB').save(os.path.join(output, '%d_mask.png' % seed_idx))
if gen_good:
good = ((good_img.permute(0, 2, 3, 1) + 1.0) * 127.5).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(good[0].cpu().numpy(), 'RGB').save(os.path.join(output, '%d_good.png' % seed_idx))
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_images() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 10,000 | 45.300926 | 150 | py |
DFMGAN | DFMGAN-main/gen_gif_dfmgan.py | """Generate GIF using pretrained network pickle."""
import os
import click
import dnnlib
import numpy as np
from PIL import Image
import torch
import legacy
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seed', help='Random seed', default=0, type=int)
@click.option('--num', help='Number of samples', default=5, type=int)
@click.option('--resolution', help='Resolution of the output images', default=128, type=int)
@click.option('--num-phases', help='Number of phases', default=5, type=int)
@click.option('--transition-frames', help='Number of transition frames per phase', default=10, type=int)
@click.option('--static-frames', help='Number of static frames per phase', default=5, type=int)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--output', type=str)
@click.option('--fix-content', '--fc', help='Use fixed z_object', type=click.BOOL, default=False, is_flag = True)
@click.option('--cond', help = 'conditional, set a label or "all"', type=str, default = 'none')
def generate_gif(
network_pkl: str,
seed: int,
num: int,
resolution: int,
num_phases: int,
transition_frames: int,
static_frames: int,
truncation_psi: float,
noise_mode: str,
output: str,
fix_content: bool,
cond: str,
):
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
transfer = (G.transfer != 'none')
if not transfer:
print('Must be a transfer model.')
exit(1)
if output is None:
assert network_pkl[-4:] == '.pkl'
kimg = network_pkl[-10:-4]
output = os.path.join(os.path.dirname(network_pkl), f'itp{kimg}.gif' if not fix_content else f'itp{kimg}_fc.gif')
outdir = os.path.dirname(output)
if outdir:
os.makedirs(outdir, exist_ok=True)
np.random.seed(seed)
output_seq = []
if cond == 'all':
num = G.c_dim
batch_size = num
latent_size = G.z_dim
latents = [np.random.randn(batch_size, latent_size) if cond != 'all' else np.random.randn(1, latent_size).repeat(batch_size, 0) for _ in range(num_phases)]
if transfer:
latents_defect = [np.random.randn(batch_size, latent_size) if cond != 'all' else np.random.randn(1, latent_size).repeat(batch_size, 0) for _ in range(num_phases)]
if cond == 'all':
num_c = G.c_dim
cond_list = [np.diag([1 for _ in range(num_c)]) for _ in range(num_phases)]
elif cond != 'none':
num_c = G.c_dim
c_label = int(cond)
c_npy = np.zeros(num_c)
c_npy[c_label] = 1
cond_list = [c_npy.reshape(1, -1).repeat(batch_size, 0) for _ in range(num_phases)]
def to_image_grid(outputs):
canvas = []
for output in outputs:
output = np.reshape(output, [num, *output.shape[1:]])
output = np.concatenate(output, axis=1)
canvas.append(output)
canvas = np.concatenate(canvas, axis = 0)
return Image.fromarray(canvas).resize((resolution * num, resolution * len(outputs)), Image.ANTIALIAS)
def transfer_generate(dlatents, defectlatents):
images, masks = G.synthesis(dlatents, defectlatents, noise_mode=noise_mode, output_mask=True)
masks = masks.repeat((1, 3, 1, 1))
rounded_masks = masks.clone()
rounded_masks[rounded_masks >= G.mask_threshold] = 1.0
rounded_masks[rounded_masks < G.mask_threshold] = -1.0
images = (images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
masks = (masks.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
rounded_masks = (rounded_masks.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
return to_image_grid([images, rounded_masks, masks])
for i in range(num_phases):
dlatents0 = G.mapping(torch.from_numpy(latents[i - 1] if not fix_content else latents[0]).to(device), None)
dlatents1 = G.mapping(torch.from_numpy(latents[i] if not fix_content else latents[0]).to(device), None)
defectlatents0 = G.defect_mapping(torch.from_numpy(latents_defect[i - 1]).to(device), None if cond == 'none' else torch.from_numpy(cond_list[i - 1]).to(device))
defectlatents1 = G.defect_mapping(torch.from_numpy(latents_defect[i]).to(device), None if cond == 'none' else torch.from_numpy(cond_list[i]).to(device))
for j in range(transition_frames):
dlatents = (dlatents0 * (transition_frames - j) + dlatents1 * j) / transition_frames
defectlatents = (defectlatents0 * (transition_frames - j) + defectlatents1 * j) / transition_frames
output_seq.append(transfer_generate(dlatents, defectlatents))
output_seq.extend([transfer_generate(dlatents, defectlatents1)] * static_frames)
if not output.endswith('.gif'):
output += '.gif'
output_seq[0].save(output, save_all=True, append_images=output_seq[1:], optimize=True, duration=100, loop=0)
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_gif() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 5,711 | 41.947368 | 170 | py |
DFMGAN | DFMGAN-main/generate_gif.py | """Generate GIF using pretrained network pickle."""
import os
import click
import dnnlib
import numpy as np
from PIL import Image
import torch
import legacy
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seed', help='Random seed', default=0, type=int)
@click.option('--num-rows', help='Number of rows', default=2, type=int)
@click.option('--num-cols', help='Number of columns', default=2, type=int)
@click.option('--resolution', help='Resolution of the output images', default=128, type=int)
@click.option('--num-phases', help='Number of phases', default=5, type=int)
@click.option('--transition-frames', help='Number of transition frames per phase', default=20, type=int)
@click.option('--static-frames', help='Number of static frames per phase', default=5, type=int)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--output', type=str, required=True)
# DFMGAN args
@click.option('--latent-mode', help='randomly sampled latent codes', type=click.Choice(['both', 'content', 'defect', 'nores', 'none']), default='both', show_default=True)
def generate_gif(
network_pkl: str,
seed: int,
num_rows: int,
num_cols: int,
resolution: int,
num_phases: int,
transition_frames: int,
static_frames: int,
truncation_psi: float,
noise_mode: str,
output: str,
latent_mode: str,
):
"""Generate gif using pretrained network pickle.
Examples:
\b
python generate_gif.py --output=obama.gif --seed=0 --num-rows=1 --num-cols=8 \\
--network=https://hanlab.mit.edu/projects/data-efficient-gans/models/DiffAugment-stylegan2-100-shot-obama.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
transfer = (G.transfer != 'none')
outdir = os.path.dirname(output)
if outdir:
os.makedirs(outdir, exist_ok=True)
np.random.seed(seed)
output_seq = []
batch_size = num_rows * num_cols
latent_size = G.z_dim
latents = [np.random.randn(batch_size, latent_size) for _ in range(num_phases)]
if transfer:
latents_defect = [np.random.randn(batch_size, latent_size) for _ in range(num_phases)]
def to_image_grid(outputs):
outputs = np.reshape(outputs, [num_rows, num_cols, *outputs.shape[1:]])
outputs = np.concatenate(outputs, axis=1)
outputs = np.concatenate(outputs, axis=1)
return Image.fromarray(outputs).resize((resolution * num_cols, resolution * num_rows), Image.ANTIALIAS)
def generate(dlatents):
images = G.synthesis(dlatents, noise_mode=noise_mode)
images = (images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
return to_image_grid(images)
def transfer_generate(dlatents, defectlatents):
images = G.synthesis(dlatents, defectlatents, noise_mode=noise_mode) if latent_mode != 'nores' else G.synthesis(dlatents, defectlatents, noise_mode=noise_mode, fix_residual_to_zero = True)
images = (images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
return to_image_grid(images)
for i in range(num_phases):
dlatents0 = G.mapping(torch.from_numpy(latents[i - 1] if latent_mode not in ['defect', 'none'] else latents[0]).to(device), None)
dlatents1 = G.mapping(torch.from_numpy(latents[i] if latent_mode not in ['defect', 'none'] else latents[0]).to(device), None)
if transfer:
defectlatents0 = G.defect_mapping(torch.from_numpy(latents_defect[i - 1] if latent_mode not in ['content', 'none'] else latents_defect[0]).to(device), None)
defectlatents1 = G.defect_mapping(torch.from_numpy(latents_defect[i] if latent_mode not in ['content', 'none'] else latents_defect[0]).to(device), None)
for j in range(transition_frames):
dlatents = (dlatents0 * (transition_frames - j) + dlatents1 * j) / transition_frames
if transfer:
defectlatents = (defectlatents0 * (transition_frames - j) + defectlatents1 * j) / transition_frames
output_seq.append(transfer_generate(dlatents, defectlatents))
else:
output_seq.append(generate(dlatents))
if transfer:
output_seq.extend([transfer_generate(dlatents1, defectlatents1)] * static_frames)
else:
output_seq.extend([generate(dlatents1)] * static_frames)
if not output.endswith('.gif'):
output += '.gif'
output_seq[0].save(output, save_all=True, append_images=output_seq[1:], optimize=False, duration=50, loop=0)
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_gif() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 5,303 | 43.571429 | 196 | py |
DFMGAN | DFMGAN-main/dataset_tool.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import functools
import io
import json
import os
import pickle
import sys
import tarfile
import gzip
import zipfile
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import click
import numpy as np
import PIL.Image, cv2
from tqdm import tqdm
#----------------------------------------------------------------------------
def error(msg):
print('Error: ' + msg)
sys.exit(1)
#----------------------------------------------------------------------------
def maybe_min(a: int, b: Optional[int]) -> int:
if b is not None:
return min(a, b)
return a
#----------------------------------------------------------------------------
def file_ext(name: Union[str, Path]) -> str:
return str(name).split('.')[-1]
#----------------------------------------------------------------------------
def is_image_ext(fname: Union[str, Path]) -> bool:
ext = file_ext(fname).lower()
return f'.{ext}' in PIL.Image.EXTENSION # type: ignore
#----------------------------------------------------------------------------
def open_image_folder(source_dir, *, max_images: Optional[int]):
input_images = [str(f) for f in sorted(Path(source_dir).rglob('*')) if is_image_ext(f) and os.path.isfile(f)]
# Load labels.
labels = {}
meta_fname = os.path.join(source_dir, 'dataset.json')
if os.path.isfile(meta_fname):
with open(meta_fname, 'r') as file:
labels = json.load(file)['labels']
if labels is not None:
labels = { x[0]: x[1] for x in labels }
else:
labels = {}
max_idx = maybe_min(len(input_images), max_images)
def iterate_images():
for idx, fname in enumerate(input_images):
arch_fname = os.path.relpath(fname, source_dir)
arch_fname = arch_fname.replace('\\', '/')
img = np.array(PIL.Image.open(fname))
yield dict(img=img, label=labels.get(arch_fname), img_name = os.path.basename(fname))
if idx >= max_idx-1:
break
return max_idx, iterate_images()
#----------------------------------------------------------------------------
def open_image_zip(source, *, max_images: Optional[int]):
with zipfile.ZipFile(source, mode='r') as z:
input_images = [str(f) for f in sorted(z.namelist()) if is_image_ext(f)]
# Load labels.
labels = {}
if 'dataset.json' in z.namelist():
with z.open('dataset.json', 'r') as file:
labels = json.load(file)['labels']
if labels is not None:
labels = { x[0]: x[1] for x in labels }
else:
labels = {}
max_idx = maybe_min(len(input_images), max_images)
def iterate_images():
with zipfile.ZipFile(source, mode='r') as z:
for idx, fname in enumerate(input_images):
with z.open(fname, 'r') as file:
img = PIL.Image.open(file) # type: ignore
img = np.array(img)
yield dict(img=img, label=labels.get(fname))
if idx >= max_idx-1:
break
return max_idx, iterate_images()
#----------------------------------------------------------------------------
def open_lmdb(lmdb_dir: str, *, max_images: Optional[int]):
import cv2 # pip install opencv-python
import lmdb # pip install lmdb # pylint: disable=import-error
with lmdb.open(lmdb_dir, readonly=True, lock=False).begin(write=False) as txn:
max_idx = maybe_min(txn.stat()['entries'], max_images)
def iterate_images():
with lmdb.open(lmdb_dir, readonly=True, lock=False).begin(write=False) as txn:
for idx, (_key, value) in enumerate(txn.cursor()):
try:
try:
img = cv2.imdecode(np.frombuffer(value, dtype=np.uint8), 1)
if img is None:
raise IOError('cv2.imdecode failed')
img = img[:, :, ::-1] # BGR => RGB
except IOError:
img = np.array(PIL.Image.open(io.BytesIO(value)))
yield dict(img=img, label=None)
if idx >= max_idx-1:
break
except:
print(sys.exc_info()[1])
return max_idx, iterate_images()
#----------------------------------------------------------------------------
def open_cifar10(tarball: str, *, max_images: Optional[int]):
images = []
labels = []
with tarfile.open(tarball, 'r:gz') as tar:
for batch in range(1, 6):
member = tar.getmember(f'cifar-10-batches-py/data_batch_{batch}')
with tar.extractfile(member) as file:
data = pickle.load(file, encoding='latin1')
images.append(data['data'].reshape(-1, 3, 32, 32))
labels.append(data['labels'])
images = np.concatenate(images)
labels = np.concatenate(labels)
images = images.transpose([0, 2, 3, 1]) # NCHW -> NHWC
assert images.shape == (50000, 32, 32, 3) and images.dtype == np.uint8
assert labels.shape == (50000,) and labels.dtype in [np.int32, np.int64]
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
max_idx = maybe_min(len(images), max_images)
def iterate_images():
for idx, img in enumerate(images):
yield dict(img=img, label=int(labels[idx]))
if idx >= max_idx-1:
break
return max_idx, iterate_images()
#----------------------------------------------------------------------------
def open_mnist(images_gz: str, *, max_images: Optional[int]):
labels_gz = images_gz.replace('-images-idx3-ubyte.gz', '-labels-idx1-ubyte.gz')
assert labels_gz != images_gz
images = []
labels = []
with gzip.open(images_gz, 'rb') as f:
images = np.frombuffer(f.read(), np.uint8, offset=16)
with gzip.open(labels_gz, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
images = images.reshape(-1, 28, 28)
images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
assert labels.shape == (60000,) and labels.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
max_idx = maybe_min(len(images), max_images)
def iterate_images():
for idx, img in enumerate(images):
yield dict(img=img, label=int(labels[idx]))
if idx >= max_idx-1:
break
return max_idx, iterate_images()
#----------------------------------------------------------------------------
def make_transform(
transform: Optional[str],
output_width: Optional[int],
output_height: Optional[int],
resize_filter: str
) -> Callable[[np.ndarray], Optional[np.ndarray]]:
resample = { 'box': PIL.Image.BOX, 'lanczos': PIL.Image.LANCZOS, 'cv2_inter_cubic': cv2.INTER_CUBIC }[resize_filter]
def resize(width, height, img):
if resize_filter == 'cv2_inter_cubic':
img = cv2.resize(img, (width, height), interpolation = resample)
return img
else:
img = PIL.Image.fromarray(img)
img = img.resize((width, height), resample)
return np.array(img)
def scale(width, height, img):
w = img.shape[1]
h = img.shape[0]
if width == w and height == h:
return img
ww = width if width is not None else w
hh = height if height is not None else h
return resize(ww, hh, img)
def center_crop(width, height, img):
crop = np.min(img.shape[:2])
img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
# img = PIL.Image.fromarray(img, 'RGB')
# img = img.resize((width, height), resample)
# return np.array(img)
return resize(width, height, img)
def center_crop_wide(width, height, img):
ch = int(np.round(width * img.shape[0] / img.shape[1]))
if img.shape[1] < width or ch < height:
return None
img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2]
# img = PIL.Image.fromarray(img, 'RGB')
# img = img.resize((width, height), resample)
# img = np.array(img)
img = resize(width, height, img)
canvas = np.zeros([width, width, 3], dtype=np.uint8)
canvas[(width - height) // 2 : (width + height) // 2, :] = img
return canvas
if transform is None:
return functools.partial(scale, output_width, output_height)
if transform == 'center-crop':
if (output_width is None) or (output_height is None):
error ('must specify --width and --height when using ' + transform + 'transform')
return functools.partial(center_crop, output_width, output_height)
if transform == 'center-crop-wide':
if (output_width is None) or (output_height is None):
error ('must specify --width and --height when using ' + transform + ' transform')
return functools.partial(center_crop_wide, output_width, output_height)
assert False, 'unknown transform'
#----------------------------------------------------------------------------
def open_dataset(source, *, max_images: Optional[int]):
if os.path.isdir(source):
if source.rstrip('/').endswith('_lmdb'):
return open_lmdb(source, max_images=max_images)
else:
return open_image_folder(source, max_images=max_images)
elif os.path.isfile(source):
if os.path.basename(source) == 'cifar-10-python.tar.gz':
return open_cifar10(source, max_images=max_images)
elif os.path.basename(source) == 'train-images-idx3-ubyte.gz':
return open_mnist(source, max_images=max_images)
elif file_ext(source) == 'zip':
return open_image_zip(source, max_images=max_images)
else:
assert False, 'unknown archive type'
else:
error(f'Missing input file or directory: {source}')
#----------------------------------------------------------------------------
def open_dest(dest: str) -> Tuple[str, Callable[[str, Union[bytes, str]], None], Callable[[], None]]:
dest_ext = file_ext(dest)
if dest_ext == 'zip':
if os.path.dirname(dest) != '':
os.makedirs(os.path.dirname(dest), exist_ok=True)
zf = zipfile.ZipFile(file=dest, mode='w', compression=zipfile.ZIP_STORED)
def zip_write_bytes(fname: str, data: Union[bytes, str]):
zf.writestr(fname, data)
return '', zip_write_bytes, zf.close
else:
# If the output folder already exists, check that is is
# empty.
#
# Note: creating the output directory is not strictly
# necessary as folder_write_bytes() also mkdirs, but it's better
# to give an error message earlier in case the dest folder
# somehow cannot be created.
if os.path.isdir(dest) and len(os.listdir(dest)) != 0:
error('--dest folder must be empty')
os.makedirs(dest, exist_ok=True)
def folder_write_bytes(fname: str, data: Union[bytes, str]):
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'wb') as fout:
if isinstance(data, str):
data = data.encode('utf8')
fout.write(data)
return dest, folder_write_bytes, lambda: None
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--source', help='Directory or archive name for input dataset', required=True, metavar='PATH')
@click.option('--dest', help='Output directory or archive name for output dataset', required=True, metavar='PATH')
@click.option('--max-images', help='Output only up to `max-images` images', type=int, default=None)
@click.option('--resize-filter', help='Filter to use when resizing images for output resolution', type=click.Choice(['box', 'lanczos', 'cv2_inter_cubic']), default='cv2_inter_cubic', show_default=True)
@click.option('--transform', help='Input crop/resize mode', type=click.Choice(['center-crop', 'center-crop-wide']))
@click.option('--width', help='Output width', type=int)
@click.option('--height', help='Output height', type=int)
@click.option('--source-mask', help = 'Directory for masks', metavar = 'PATH', default = None)
def convert_dataset(
ctx: click.Context,
source: str,
dest: str,
max_images: Optional[int],
transform: Optional[str],
resize_filter: str,
width: Optional[int],
height: Optional[int],
source_mask: str,
):
"""Convert an image dataset into a dataset archive usable with StyleGAN2 ADA PyTorch.
The input dataset format is guessed from the --source argument:
\b
--source *_lmdb/ Load LSUN dataset
--source cifar-10-python.tar.gz Load CIFAR-10 dataset
--source train-images-idx3-ubyte.gz Load MNIST dataset
--source path/ Recursively load all images from path/
--source dataset.zip Recursively load all images from dataset.zip
Specifying the output format and path:
\b
--dest /path/to/dir Save output files under /path/to/dir
--dest /path/to/dataset.zip Save output files into /path/to/dataset.zip
The output dataset format can be either an image folder or an uncompressed zip archive.
Zip archives makes it easier to move datasets around file servers and clusters, and may
offer better training performance on network file systems.
Images within the dataset archive will be stored as uncompressed PNG.
Uncompresed PNGs can be efficiently decoded in the training loop.
Class labels are stored in a file called 'dataset.json' that is stored at the
dataset root folder. This file has the following structure:
\b
{
"labels": [
["00000/img00000000.png",6],
["00000/img00000001.png",9],
... repeated for every image in the datase
["00049/img00049999.png",1]
]
}
If the 'dataset.json' file cannot be found, the dataset is interpreted as
not containing class labels.
Image scale/crop and resolution requirements:
Output images must be square-shaped and they must all have the same power-of-two
dimensions.
To scale arbitrary input image size to a specific width and height, use the
--width and --height options. Output resolution will be either the original
input resolution (if --width/--height was not specified) or the one specified with
--width/height.
Use the --transform=center-crop or --transform=center-crop-wide options to apply a
center crop transform on the input image. These options should be used with the
--width and --height options. For example:
\b
python dataset_tool.py --source LSUN/raw/cat_lmdb --dest /tmp/lsun_cat \\
--transform=center-crop-wide --width 512 --height=384
"""
PIL.Image.init() # type: ignore
if dest == '':
ctx.fail('--dest output filename or directory must not be an empty string')
num_files, input_iter = open_dataset(source, max_images=max_images)
archive_root_dir, save_bytes, close_dest = open_dest(dest)
transform_image = make_transform(transform, width, height, resize_filter)
dataset_attrs = None
labels = []
for idx, image in tqdm(enumerate(input_iter), total=num_files):
idx_str = f'{idx:08d}'
archive_fname = f'{idx_str[:5]}/img{idx_str}' + ('.npy' if source_mask is not None else '.png')
# Apply crop and resize.
img = transform_image(image['img'])
# Transform may drop images.
if img is None:
continue
# Error check to require uniform image attributes across
# the whole dataset.
channels = img.shape[2] if img.ndim == 3 else 1
cur_image_attrs = {
'width': img.shape[1],
'height': img.shape[0],
'channels': channels
}
if dataset_attrs is None:
dataset_attrs = cur_image_attrs
width = dataset_attrs['width']
height = dataset_attrs['height']
if width != height:
error(f'Image dimensions after scale and crop are required to be square. Got {width}x{height}')
if dataset_attrs['channels'] not in [1, 3]:
error('Input images must be stored as RGB or grayscale')
if width != 2 ** int(np.floor(np.log2(width))):
error('Image width/height after scale and crop are required to be power-of-two')
elif dataset_attrs != cur_image_attrs:
err = [f' dataset {k}/cur image {k}: {dataset_attrs[k]}/{cur_image_attrs[k]}' for k in dataset_attrs.keys()]
error(f'Image {archive_fname} attributes must be equal across all images of the dataset. Got:\n' + '\n'.join(err))
image_bits = io.BytesIO()
# Load the corresponding mask
if source_mask is not None:
img_idx, img_ext = os.path.splitext(image['img_name'])
mask = np.array(PIL.Image.open(os.path.join(source_mask, f'{img_idx}_mask{img_ext}')))
mask = transform_image(mask)
mask[mask >= 127.5] = 255
mask[mask < 127.5] = 0
img = np.concatenate((img, np.expand_dims(mask, axis = -1)), axis = 2)
assert img.shape == (height, width, 4)
np.save(image_bits, img)
else:
# Save the image as an uncompressed PNG.
img = PIL.Image.fromarray(img, { 1: 'L', 3: 'RGB' }[channels])
img.save(image_bits, format='png', compress_level=0, optimize=False)
save_bytes(os.path.join(archive_root_dir, archive_fname), image_bits.getbuffer())
labels.append([archive_fname, image['label']] if image['label'] is not None else None)
metadata = {
'labels': labels if all(x is not None for x in labels) else None
}
save_bytes(os.path.join(archive_root_dir, 'dataset.json'), json.dumps(metadata))
close_dest()
#----------------------------------------------------------------------------
if __name__ == "__main__":
convert_dataset() # pylint: disable=no-value-for-parameter
| 19,055 | 39.372881 | 201 | py |
DFMGAN | DFMGAN-main/train.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Train a GAN using the techniques described in the paper
"Training Generative Adversarial Networks with Limited Data"."""
import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
#----------------------------------------------------------------------------
class UserError(Exception):
pass
#----------------------------------------------------------------------------
def setup_training_loop_kwargs(
# General options (not included in desc).
gpus = None, # Number of GPUs: <int>, default = 1 gpu
snap = None, # Snapshot interval: <int>, default = 50 ticks
metrics = None, # List of metric names: [], ['fid50k_full'] (default), ...
seed = None, # Random seed: <int>, default = 0
# Dataset.
data = None, # Training dataset (required): <path>
cond = None, # Train conditional model based on dataset labels: <bool>, default = False
subset = None, # Train with only N images: <int>, default = all
mirror = None, # Augment dataset with x-flips: <bool>, default = False
# Base config.
cfg = None, # Base config: 'auto' (default), 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar'
gamma = None, # Override R1 gamma: <float>
kimg = None, # Override training duration: <int>
batch = None, # Override batch size: <int>
# Discriminator augmentation.
aug = None, # Augmentation mode: 'ada' (default), 'noaug', 'fixed'
p = None, # Specify p for 'fixed' (required): <float>
target = None, # Override ADA target for 'ada': <float>, default = depends on aug
augpipe = None, # Augmentation pipeline: 'blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc' (default), ..., 'bgcfnc'
# Transfer learning.
resume = None, # Load previous network: 'noresume' (default), 'ffhq256', 'ffhq512', 'ffhq1024', 'celebahq256', 'lsundog256', <file>, <url>
freezed = None, # Freeze-D: <int>, default = 0 discriminator layers
# Performance options (not included in desc).
fp32 = None, # Disable mixed-precision training: <bool>, default = False
nhwc = None, # Use NHWC memory format with FP16: <bool>, default = False
allow_tf32 = None, # Allow PyTorch to use TF32 for matmul and convolutions: <bool>, default = False
nobench = None, # Disable cuDNN benchmarking: <bool>, default = False
workers = None, # Override number of DataLoader workers: <int>, default = 3
# DFMGAN args
ft = None,
transfer = None,
res_st = None,
uni_st = None,
mask_threshold = None,
lambda_match = None,
mode_seek = None,
lambda_ms = None,
no_round = None,
tanh_k = None,
tanh_mask = None,
dmatch_scale = None,
):
args = dnnlib.EasyDict()
# ------------------------------------------
# General options: gpus, snap, metrics, seed
# ------------------------------------------
if gpus is None:
gpus = 1
assert isinstance(gpus, int)
if not (gpus >= 1 and gpus & (gpus - 1) == 0):
raise UserError('--gpus must be a power of two')
args.num_gpus = gpus
if snap is None:
snap = 50
assert isinstance(snap, int)
if snap < 1:
raise UserError('--snap must be at least 1')
args.image_snapshot_ticks = snap
args.network_snapshot_ticks = snap
if metrics is None:
if transfer is not None:
metrics = ['fid5k_full', 'kid5k_full', 'clpips1k']
else:
metrics = ['fid50k_full']
assert isinstance(metrics, list)
if not all(metric_main.is_valid_metric(metric) for metric in metrics):
raise UserError('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
args.metrics = metrics
if seed is None:
seed = 0
assert isinstance(seed, int)
args.random_seed = seed
# -----------------------------------
# Dataset: data, cond, subset, mirror
# -----------------------------------
assert data is not None
assert isinstance(data, str)
args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2)
try:
training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution
args.training_set_kwargs.use_labels = training_set.has_labels # be explicit about labels
args.training_set_kwargs.max_size = len(training_set) # be explicit about dataset size
desc = training_set.name
del training_set # conserve memory
except IOError as err:
raise UserError(f'--data: {err}')
if cond is None:
cond = False
assert isinstance(cond, bool)
if cond:
if not args.training_set_kwargs.use_labels:
raise UserError('--cond=True requires labels specified in dataset.json')
desc += '-cond'
else:
args.training_set_kwargs.use_labels = False
if subset is not None:
assert isinstance(subset, int)
if not 1 <= subset <= args.training_set_kwargs.max_size:
raise UserError(f'--subset must be between 1 and {args.training_set_kwargs.max_size}')
desc += f'-subset{subset}'
if subset < args.training_set_kwargs.max_size:
args.training_set_kwargs.max_size = subset
args.training_set_kwargs.random_seed = args.random_seed
if mirror is None:
mirror = False
assert isinstance(mirror, bool)
if mirror:
desc += '-mirror'
args.training_set_kwargs.xflip = True
# ------------------------------------
# Base config: cfg, gamma, kimg, batch
# ------------------------------------
if cfg is None:
cfg = 'auto'
assert isinstance(cfg, str)
desc += f'-{cfg}'
cfg_specs = {
'auto': dict(ref_gpus=-1, kimg=25000, mb=-1, mbstd=-1, fmaps=-1, lrate=-1, gamma=-1, ema=-1, ramp=0.05, map=2), # Populated dynamically based on resolution and GPU count.
'stylegan2': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=10, ema=10, ramp=None, map=8), # Uses mixed-precision, unlike the original StyleGAN2.
'paper256': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=0.5, lrate=0.0025, gamma=1, ema=20, ramp=None, map=8),
'paper512': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=1, lrate=0.0025, gamma=0.5, ema=20, ramp=None, map=8),
'paper1024': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=2, ema=10, ramp=None, map=8),
'cifar': dict(ref_gpus=2, kimg=100000, mb=64, mbstd=32, fmaps=1, lrate=0.0025, gamma=0.01, ema=500, ramp=0.05, map=2),
}
assert cfg in cfg_specs
spec = dnnlib.EasyDict(cfg_specs[cfg])
if cfg == 'auto':
desc += f'{gpus:d}'
spec.ref_gpus = gpus
res = args.training_set_kwargs.resolution
spec.mb = max(min(gpus * min(4096 // res, 32), 64), gpus) # keep gpu memory consumption at bay
spec.mbstd = min(spec.mb // gpus, 4) # other hyperparams behave more predictably if mbstd group size remains fixed
spec.fmaps = 1 if res >= 512 else 0.5
spec.lrate = 0.002 if res >= 1024 else 0.0025
spec.gamma = 0.0002 * (res ** 2) / spec.mb # heuristic formula
spec.ema = spec.mb * 10 / 32
args.G_kwargs = dnnlib.EasyDict(class_name='training.networks.Generator', z_dim=512, w_dim=512, mapping_kwargs=dnnlib.EasyDict(), synthesis_kwargs=dnnlib.EasyDict())
target_D_class = 'training.networks.%s' % ('DiscriminatorUnified' if transfer == 'res_block_uni_dis' else 'Discriminator')
args.D_kwargs = dnnlib.EasyDict(class_name=target_D_class, block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())
args.G_kwargs.synthesis_kwargs.channel_base = args.D_kwargs.channel_base = int(spec.fmaps * 32768)
args.G_kwargs.synthesis_kwargs.channel_max = args.D_kwargs.channel_max = 512
args.G_kwargs.mapping_kwargs.num_layers = spec.map
args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 4 # enable mixed-precision training
args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow
args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd
if transfer == 'res_block_match_dis':
args.D_match_kwargs = dnnlib.EasyDict(class_name='training.networks.Discriminator', block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())
if dmatch_scale is None:
dmatch_base = int(spec.fmaps * 32768) # 16384
dmatch_max = 512
else:
dmatch_base = int(dmatch_scale.split('/')[0])
dmatch_max = int(dmatch_scale.split('/')[1])
args.D_match_kwargs.channel_base = dmatch_base #int(spec.fmaps * 32768) # 16384 (= 16 * 1024)
args.D_match_kwargs.channel_max = dmatch_max #512
args.D_match_kwargs.num_fp16_res = 4
args.D_match_kwargs.conv_clamp = 256
args.D_match_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd
args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
if transfer == 'res_block_match_dis':
args.D_match_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss', r1_gamma=spec.gamma)
args.total_kimg = spec.kimg
args.batch_size = spec.mb
args.batch_gpu = spec.mb // spec.ref_gpus
args.ema_kimg = spec.ema
args.ema_rampup = spec.ramp
if cfg == 'cifar':
args.loss_kwargs.pl_weight = 0 # disable path length regularization
args.loss_kwargs.style_mixing_prob = 0 # disable style mixing
args.D_kwargs.architecture = 'orig' # disable residual skip connections
if gamma is not None:
assert isinstance(gamma, float)
if not gamma >= 0:
raise UserError('--gamma must be non-negative')
desc += f'-gamma{gamma:g}'
args.loss_kwargs.r1_gamma = gamma
if kimg is not None:
assert isinstance(kimg, int)
if not kimg >= 1:
raise UserError('--kimg must be at least 1')
desc += f'-kimg{kimg:d}'
args.total_kimg = kimg
if batch is not None:
assert isinstance(batch, int)
if not (batch >= 1 and batch % gpus == 0):
raise UserError('--batch must be at least 1 and divisible by --gpus')
desc += f'-batch{batch}'
args.batch_size = batch
args.batch_gpu = batch // gpus
# ---------------------------------------------------
# Discriminator augmentation: aug, p, target, augpipe
# ---------------------------------------------------
if aug is None:
aug = 'ada'
else:
assert isinstance(aug, str)
desc += f'-{aug}'
if aug == 'ada':
args.ada_target = 0.6
elif aug == 'noaug':
pass
elif aug == 'fixed':
if p is None:
raise UserError(f'--aug={aug} requires specifying --p')
else:
raise UserError(f'--aug={aug} not supported')
if p is not None:
assert isinstance(p, float)
if aug != 'fixed':
raise UserError('--p can only be specified with --aug=fixed')
if not 0 <= p <= 1:
raise UserError('--p must be between 0 and 1')
desc += f'-p{p:g}'
args.augment_p = p
if target is not None:
assert isinstance(target, float)
if aug != 'ada':
raise UserError('--target can only be specified with --aug=ada')
if not 0 <= target <= 1:
raise UserError('--target must be between 0 and 1')
desc += f'-target{target:g}'
args.ada_target = target
assert augpipe is None or isinstance(augpipe, str)
if augpipe is None:
augpipe = 'bgc'
else:
if aug == 'noaug':
raise UserError('--augpipe cannot be specified with --aug=noaug')
desc += f'-{augpipe}'
augpipe_specs = {
'blit': dict(xflip=1, rotate90=1, xint=1),
'geom': dict(scale=1, rotate=1, aniso=1, xfrac=1),
'color': dict(brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'filter': dict(imgfilter=1),
'noise': dict(noise=1),
'cutout': dict(cutout=1),
'bg': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1),
'bgc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'bgcf': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1),
'bgcfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1),
'bgcfnc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1, cutout=1),
}
assert augpipe in augpipe_specs
if aug != 'noaug':
args.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', **augpipe_specs[augpipe])
# ----------------------------------
# Transfer learning: resume, freezed
# ----------------------------------
resume_specs = {
'ffhq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res256-mirror-paper256-noaug.pkl',
'ffhq512': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res512-mirror-stylegan2-noaug.pkl',
'ffhq1024': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res1024-mirror-stylegan2-noaug.pkl',
'celebahq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/celebahq-res256-mirror-paper256-kimg100000-ada-target0.5.pkl',
'lsundog256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/lsundog-res256-paper256-kimg100000-noaug.pkl',
}
assert resume is None or isinstance(resume, str)
if resume is None:
resume = 'noresume'
elif resume == 'noresume':
desc += '-noresume'
elif resume in resume_specs:
desc += f'-resume{resume}'
args.resume_pkl = resume_specs[resume] # predefined url
else:
desc += '-resumecustom'
args.resume_pkl = resume # custom path or url
if resume != 'noresume':
args.ada_kimg = 100 # make ADA react faster at the beginning
args.ema_rampup = None # disable EMA rampup
if freezed is not None:
assert isinstance(freezed, int)
if not freezed >= 0:
raise UserError('--freezed must be non-negative')
desc += f'-freezed{freezed:d}'
args.D_kwargs.block_kwargs.freeze_layers = freezed
# -------------------------------------------------
# Performance options: fp32, nhwc, nobench, workers
# -------------------------------------------------
if fp32 is None:
fp32 = False
assert isinstance(fp32, bool)
if fp32:
args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 0
args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = None
args.D_match_kwargs.num_fp16_res = 0
args.D_match_kwargs.conv_clamp = None
if nhwc is None:
nhwc = False
assert isinstance(nhwc, bool)
if nhwc:
args.G_kwargs.synthesis_kwargs.fp16_channels_last = args.D_kwargs.block_kwargs.fp16_channels_last = True
args.D_match_kwargs.block_kwargs.fp16_channels_last = True
if nobench is None:
nobench = False
assert isinstance(nobench, bool)
if nobench:
args.cudnn_benchmark = False
if allow_tf32 is None:
allow_tf32 = False
assert isinstance(allow_tf32, bool)
if allow_tf32:
args.allow_tf32 = True
if workers is not None:
assert isinstance(workers, int)
if not workers >= 1:
raise UserError('--workers must be at least 1')
args.data_loader_kwargs.num_workers = workers
# DFMGAN args
if ft is None or resume == 'noresume':
args.ft = 'default'
else:
args.ft = ft
if transfer is None:
transfer = 'none'
elif transfer != 'none':
args.ft = 'transfer'
args.G_kwargs.transfer = transfer
args.loss_kwargs.transfer = transfer
if transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
if res_st is None:
args.G_kwargs.synthesis_kwargs.res_st = 64
else:
args.G_kwargs.synthesis_kwargs.res_st = int(res_st)
if transfer == 'res_block_uni_dis':
if uni_st is None:
args.D_kwargs.uni_st = 64
else:
args.D_kwargs.uni_st = int(uni_st)
if mask_threshold is None:
args.G_kwargs.mask_threshold = 0.0
else:
args.G_kwargs.mask_threshold = float(mask_threshold)
if lambda_match is None:
args.loss_kwargs.lambda_match = 1.0
else:
args.loss_kwargs.lambda_match = float(lambda_match)
args.loss_kwargs.mode_seek = 'none' if (transfer == 'none' or mode_seek is None) else mode_seek
args.loss_kwargs.lambda_ms = 0.1 if lambda_ms is None else float(lambda_ms)
if no_round is None:
no_round = False
assert isinstance(no_round, bool)
args.G_kwargs.synthesis_kwargs.no_round = no_round
if tanh_mask is None:
tanh_mask = 'none'
if tanh_k is None:
tanh_k = 1.0
args.G_kwargs.synthesis_kwargs.tanh_mask = args.loss_kwargs.tanh_mask = tanh_mask
args.G_kwargs.synthesis_kwargs.tanh_k = args.loss_kwargs.tanh_k = tanh_k
return desc, args
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0:
custom_ops.verbosity = 'none'
# Execute training loop.
training_loop.training_loop(rank=rank, **args)
#----------------------------------------------------------------------------
class CommaSeparatedList(click.ParamType):
name = 'list'
def convert(self, value, param, ctx):
_ = param, ctx
if value is None or value.lower() == 'none' or value == '':
return []
return value.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
# General options.
@click.option('--outdir', help='Where to save the results', required=True, metavar='DIR')
@click.option('--gpus', help='Number of GPUs to use [default: 1]', type=int, metavar='INT')
@click.option('--snap', help='Snapshot interval [default: 50 ticks]', type=int, metavar='INT')
@click.option('--metrics', help='Comma-separated list or "none" [default: fid5k_full, kid5k_full, is5k]', type=CommaSeparatedList())
@click.option('--seed', help='Random seed [default: 0]', type=int, metavar='INT')
@click.option('-n', '--dry-run', help='Print training options and exit', is_flag=True)
# Dataset.
@click.option('--data', help='Training data (directory or zip)', metavar='PATH', required=True)
@click.option('--cond', help='Train conditional model based on dataset labels [default: false]', type=bool, metavar='BOOL', is_flag = True)
@click.option('--subset', help='Train with only N images [default: all]', type=int, metavar='INT')
@click.option('--mirror', help='Enable dataset x-flips [default: false]', type=bool, metavar='BOOL')
# Base config.
@click.option('--cfg', help='Base config [default: auto]', type=click.Choice(['auto', 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar']))
@click.option('--gamma', help='Override R1 gamma', type=float)
@click.option('--kimg', help='Override training duration', type=int, metavar='INT')
@click.option('--batch', help='Override batch size', type=int, metavar='INT')
# Discriminator augmentation.
@click.option('--aug', help='Augmentation mode [default: ada]', type=click.Choice(['noaug', 'ada', 'fixed']))
@click.option('--p', help='Augmentation probability for --aug=fixed', type=float)
@click.option('--target', help='ADA target value for --aug=ada', type=float)
@click.option('--augpipe', help='Augmentation pipeline [default: bgc]', type=click.Choice(['blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc', 'bgcf', 'bgcfn', 'bgcfnc']))
# Transfer learning.
@click.option('--resume', help='Resume training [default: noresume]', metavar='PKL')
@click.option('--freezed', help='Freeze-D [default: 0 layers]', type=int, metavar='INT')
# Performance options.
@click.option('--fp32', help='Disable mixed-precision training', type=bool, metavar='BOOL')
@click.option('--nhwc', help='Use NHWC memory format with FP16', type=bool, metavar='BOOL')
@click.option('--nobench', help='Disable cuDNN benchmarking', type=bool, metavar='BOOL')
@click.option('--allow-tf32', help='Allow PyTorch to use TF32 internally', type=bool, metavar='BOOL')
@click.option('--workers', help='Override number of DataLoader workers', type=int, metavar='INT')
# DFMGAN args
@click.option('--ft', help='Finetune mode [default: default]',
type=click.Choice(['default', 'ft_map', 'ft_syn', 'ft_syn_2', 'ft_map_syn_2']))
@click.option('--transfer', help='Extra network for transfer learning [default: none]', type=click.Choice(['none', 'dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']))
@click.option('--res-st', help='Starting resolution for ResBlock [default: 64]', type=click.Choice(['4', '8', '16', '32', '64', '128', '256']), metavar='INT')
@click.option('--uni-st', help='Starting resolution for UnifiedBlock of Discriminator [default: 64]', type=click.Choice(['4', '8', '16', '32', '64', '128', '256']), metavar='INT')
@click.option('--mask-threshold', help='The threshold value between mask/non-mask regions [default: 0.0]', type=float)
@click.option('--lambda-match', help='Gmain_loss = loss_from_D + lambda * loss_from_D_match [default: 1.0]', type=float)
@click.option('--mode-seek', help='Method for mode seeking loss [default: w/mask]', default='w/mask', type=click.Choice(['none', 'w/mask', 'w/img', 'z/mask']))
@click.option('--lambda-ms', help='loss_Gmain + lambda * loss_MS [default: 1.0]', type=float)
@click.option('--no-round', help='Use a soft mask if setting True [default: False]', type=bool, metavar='BOOL', is_flag = True)
@click.option('--tanh-k', help='mask = tanh(k * raw_mask) [default: 10.0]', default=10.0, type=float)
@click.option('--tanh-mask', help='Add tanh() to mask [default: late]', default='late', type=click.Choice(['none', 'late']))
@click.option('--dmatch-scale', help='D_match channel base / channel max [default: 4096/128]', default='4096/128', type=click.Choice(['16384/512', '8192/256', '4096/128']))
#@click.option('--transfer-cond', help='Enable multi-class defects [default: False]', type = bool, metavar = 'BOOL', is_flag = True)
def main(ctx, outdir, dry_run, **config_kwargs):
"""Train a GAN using the techniques described in the paper
"Training Generative Adversarial Networks with Limited Data".
Examples:
\b
# Train with custom dataset using 1 GPU.
python train.py --outdir=~/training-runs --data=~/mydataset.zip --gpus=1
\b
# Train class-conditional CIFAR-10 using 2 GPUs.
python train.py --outdir=~/training-runs --data=~/datasets/cifar10.zip \\
--gpus=2 --cfg=cifar --cond=1
\b
# Transfer learn MetFaces from FFHQ using 4 GPUs.
python train.py --outdir=~/training-runs --data=~/datasets/metfaces.zip \\
--gpus=4 --cfg=paper1024 --mirror=1 --resume=ffhq1024 --snap=10
\b
# Reproduce original StyleGAN2 config F.
python train.py --outdir=~/training-runs --data=~/datasets/ffhq.zip \\
--gpus=8 --cfg=stylegan2 --mirror=1 --aug=noaug
\b
Base configs (--cfg):
auto Automatically select reasonable defaults based on resolution
and GPU count. Good starting point for new datasets.
stylegan2 Reproduce results for StyleGAN2 config F at 1024x1024.
paper256 Reproduce results for FFHQ and LSUN Cat at 256x256.
paper512 Reproduce results for BreCaHAD and AFHQ at 512x512.
paper1024 Reproduce results for MetFaces at 1024x1024.
cifar Reproduce results for CIFAR-10 at 32x32.
\b
Transfer learning source networks (--resume):
ffhq256 FFHQ trained at 256x256 resolution.
ffhq512 FFHQ trained at 512x512 resolution.
ffhq1024 FFHQ trained at 1024x1024 resolution.
celebahq256 CelebA-HQ trained at 256x256 resolution.
lsundog256 LSUN Dog trained at 256x256 resolution.
<PATH or URL> Custom network pickle.
"""
dnnlib.util.Logger(should_flush=True)
# Setup training options.
try:
run_desc, args = setup_training_loop_kwargs(**config_kwargs)
except UserError as err:
ctx.fail(err)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
args.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')
assert not os.path.exists(args.run_dir)
# Print options.
print()
print('Training options:')
print(json.dumps(args, indent=2))
print()
print(f'Output directory: {args.run_dir}')
print(f'Training data: {args.training_set_kwargs.path}')
print(f'Training duration: {args.total_kimg} kimg')
print(f'Number of GPUs: {args.num_gpus}')
print(f'Number of images: {args.training_set_kwargs.max_size}')
print(f'Image resolution: {args.training_set_kwargs.resolution}')
print(f'Conditional model: {args.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {args.training_set_kwargs.xflip}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Create output directory.
print('Creating output directory...')
os.makedirs(args.run_dir)
with open(os.path.join(args.run_dir, 'training_options.json'), 'wt') as f:
json.dump(args, f, indent=2)
# Launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 29,176 | 44.095827 | 192 | py |
DFMGAN | DFMGAN-main/calc_metrics.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Calculate quality metrics for previous training run or pretrained network pickle."""
import os
import click
import json
import tempfile
import copy
import torch
import dnnlib
import legacy
from metrics import metric_main
from metrics import metric_utils
from torch_utils import training_stats
from torch_utils import custom_ops
from torch_utils import misc
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0 or not args.verbose:
custom_ops.verbosity = 'none'
# Print network summary.
G = None
device = torch.device('cuda', rank)
if not hasattr(args, 'dataset2_kwargs'):
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
'''
if rank == 0 and args.verbose:
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
'''
if rank == 0 and args.verbose:
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
input_list = [z, c]
if G.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_z = torch.empty([1, G.z_dim], device=device)
input_list.append(defect_z)
if G.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
input_list.append(True)
misc.print_module_summary(G, input_list)
else:
misc.print_module_summary(G, input_list)
# Calculate each metric.
for metric in args.metrics:
if rank == 0 and args.verbose:
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs,
num_gpus=args.num_gpus, rank=rank, device=device, progress=progress, dataset2_kwargs=args.dataset2_kwargs if hasattr(args, 'dataset2_kwargs') else {}, cache = args.cache)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if rank == 0 and args.verbose:
print()
# Done.
if rank == 0 and args.verbose:
print('Exiting...')
#----------------------------------------------------------------------------
class CommaSeparatedList(click.ParamType):
name = 'list'
def convert(self, value, param, ctx):
_ = param, ctx
if isinstance(value, list):
return value
if value is None or value.lower() == 'none' or value == '':
return []
return value.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('network_pkl', '--network', help='Network pickle filename or URL', metavar='PATH')
@click.option('--metrics', help='Comma-separated list or "none"', type=CommaSeparatedList(), default='fid5k_full,kid5k_full,clpips1k', show_default=True)
@click.option('--data', help='Dataset to evaluate metrics against (directory or zip) [default: same as training data]', metavar='PATH')
@click.option('--data2', help='Dataset2 to evaluate metrics against (directory or zip)', metavar='PATH')
@click.option('--mirror', help='Whether the dataset was augmented with x-flips during training [default: look up]', type=bool, metavar='BOOL')
@click.option('--gpus', help='Number of GPUs to use', type=int, default=1, metavar='INT', show_default=True)
@click.option('--verbose', help='Print optional information', type=bool, default=True, metavar='BOOL', show_default=True)
@click.option('--cache', help='Use computed cache', type=bool, default=False, metavar='BOOL', show_default=True)
def calc_metrics(ctx, network_pkl, metrics, data, data2, mirror, gpus, verbose, cache):
"""Calculate quality metrics for previous training run or pretrained network pickle.
Examples:
\b
# Previous training run: look up options automatically, save result to JSONL file.
python calc_metrics.py --metrics=pr50k3_full \\
--network=~/training-runs/00000-ffhq10k-res64-auto1/network-snapshot-000000.pkl
\b
# Pre-trained network pickle: specify dataset explicitly, print result to stdout.
python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq.zip --mirror=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
Available metrics:
\b
ADA paper:
fid50k_full Frechet inception distance against the full dataset.
kid50k_full Kernel inception distance against the full dataset.
pr50k3_full Precision and recall againt the full dataset.
is50k Inception score for CIFAR-10.
\b
StyleGAN and StyleGAN2 papers:
fid50k Frechet inception distance against 50k real images.
kid50k Kernel inception distance against 50k real images.
pr50k3 Precision and recall against 50k real images.
ppl2_wend Perceptual path length in W at path endpoints against full image.
ppl_zfull Perceptual path length in Z for full paths against cropped image.
ppl_wfull Perceptual path length in W for full paths against cropped image.
ppl_zend Perceptual path length in Z at path endpoints against cropped image.
ppl_wend Perceptual path length in W at path endpoints against cropped image.
"""
dnnlib.util.Logger(should_flush=True)
# Validate arguments.
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose, cache=cache)
if not all(metric_main.is_valid_metric(metric) for metric in args.metrics):
ctx.fail('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
if not args.num_gpus >= 1:
ctx.fail('--gpus must be at least 1')
# Load network.
if network_pkl is not None:
if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
ctx.fail('--network must point to a file or URL')
if args.verbose:
print(f'Loading network from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
network_dict = legacy.load_network_pkl(f)
args.G = network_dict['G_ema'] # subclass of torch.nn.Module
# Initialize dataset options.
if data is not None:
args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data)
elif network_dict['training_set_kwargs'] is not None:
args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs'])
else:
ctx.fail('Could not look up dataset options; please specify --data')
if data2 is not None:
args.dataset2_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data2)
# Finalize dataset options.
if network_pkl is not None:
args.dataset_kwargs.resolution = args.G.img_resolution
args.dataset_kwargs.use_labels = (args.G.c_dim != 0)
else:
args.dataset_kwargs.resolution = args.dataset2_kwargs.resolution = 256
args.dataset_kwargs.use_labels = args.dataset2_kwargs.use_labels = False
if mirror is not None:
args.dataset_kwargs.xflip = mirror
# Print dataset options.
if args.verbose:
print('Dataset options:')
print(json.dumps(args.dataset_kwargs, indent=2))
# Locate run dir.
args.run_dir = None
if network_pkl is not None:
args.run_dir = None
if os.path.isfile(network_pkl):
pkl_dir = os.path.dirname(network_pkl)
if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')):
args.run_dir = pkl_dir
# Launch processes.
if args.verbose:
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
calc_metrics() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 9,992 | 43.413333 | 182 | py |
DFMGAN | DFMGAN-main/training/loss.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain): # to be overridden by subclass
raise NotImplementedError()
#----------------------------------------------------------------------------
class StyleGAN2Loss(Loss):
def __init__(self, device, G_mapping, G_synthesis, D, lambda_match, lambda_ms, mode_seek, tanh_mask, tanh_k, D_match = None, augment_pipe=None, G_defect_mapping = None, style_mixing_prob=0.9, r1_gamma=10, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2,
transfer=None):
super().__init__()
self.device = device
self.G_mapping = G_mapping
self.G_synthesis = G_synthesis
self.D = D
self.D_match = D_match
if transfer == 'res_block_match_dis':
assert self.D_match is not None
self.augment_pipe = augment_pipe
self.style_mixing_prob = style_mixing_prob
self.r1_gamma = r1_gamma
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_weight = pl_weight
self.pl_mean = torch.zeros([], device=device)
self.G_defect_mapping = G_defect_mapping
self.transfer = transfer
self.lambda_match = lambda_match
self.lambda_ms = lambda_ms
self.mode_seek = mode_seek
self.tanh_mask = tanh_mask
self.tanh_k = tanh_k
self.phases_printed = False
def run_G(self, z, c, sync, defect_z = None, transfer = 'none', output_mask = False, mode_seek = 'none'):
with misc.ddp_sync(self.G_mapping, sync):
ws = self.G_mapping(z, c)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G_mapping(torch.randn_like(z), c, skip_w_avg_update=True)[:, cutoff:]
if transfer != 'none':
with misc.ddp_sync(self.G_defect_mapping, sync):
defect_ws = self.G_defect_mapping(defect_z, c)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
defect_cutoff = torch.empty([], dtype=torch.int64, device=defect_ws.device).random_(1, defect_ws.shape[1])
defect_cutoff = torch.where(torch.rand([], device=defect_ws.device) < self.style_mixing_prob, defect_cutoff, torch.full_like(defect_cutoff, defect_ws.shape[1]))
defect_ws[:, defect_cutoff:] = self.G_defect_mapping(torch.randn_like(defect_z), c, skip_w_avg_update=True)[:, defect_cutoff:]
with misc.ddp_sync(self.G_synthesis, sync):
input_list = [ws]
if transfer == 'none':
img = self.G_synthesis(ws)
elif transfer == 'dual_mod':
ws += defect_ws
img = self.G_synthesis(ws)
elif transfer == 'res_block':
img = self.G_synthesis(ws, defect_ws)
input_list.append(defect_ws)
elif transfer in ['res_block_match_dis', 'res_block_uni_dis']:
if output_mask:
img, mask = self.G_synthesis(ws, defect_ws, output_mask = output_mask)
else:
img = self.G_synthesis(ws, defect_ws, output_mask = output_mask)
input_list.append(defect_ws)
if mode_seek in ['w/mask', 'w/img', 'z/mask'] and output_mask:
half_batch = ws.shape[0] // 2
half_img, half_mask = self.G_synthesis(ws[:half_batch], defect_ws[half_batch:], output_mask = True)
if transfer in ['res_block_match_dis', 'res_block_uni_dis'] and output_mask:
if mode_seek in ['w/mask', 'z/mask']:
return img, mask, half_mask, input_list
elif mode_seek == 'w/img':
return img, mask, half_img, input_list
return img, mask, input_list
else:
return img, input_list
def run_D(self, img, c, sync):
if self.augment_pipe is not None:
img = self.augment_pipe(img)
with misc.ddp_sync(self.D, sync):
logits = self.D(img, c)
return logits
def run_D_uni(self, img, mask, c, sync):
#if self.augment_pipe is not None:
# img = self.augment_pipe(img)
with misc.ddp_sync(self.D, sync):
logits = self.D(img, mask, c)
return logits
def run_D_match(self, img_mask, c, sync):
#if self.augment_pipe is not None:
# img = self.augment_pipe(img)
with misc.ddp_sync(self.D_match, sync):
logits = self.D_match(img_mask, c)
return logits
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, gen_defect_z = None, real_mask = None, mask_threshold = 0.0):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth', 'D_matchmain', 'D_matchreg', 'D_matchboth']
do_Gmain = (phase in ['Gmain', 'Gboth'])
do_Dmain = (phase in ['Dmain', 'Dboth'])
do_D_matchmain = (phase in ['D_matchmain', 'D_matchboth'])
do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0)
do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0)
do_D_matchr1 = (phase in ['D_matchreg', 'D_matchboth']) and (self.r1_gamma != 0)
# print({
# 'do_Gmain': do_Gmain,
# 'do_Dmain': do_Dmain,
# 'do_D_matchmain': do_D_matchmain,
# 'do_Gpl': do_Gpl,
# 'do_Dr1': do_Dr1,
# 'do_D_matchr1': do_D_matchr1,
# })
# Gmain: Maximize logits for generated images.
if do_Gmain:
if self.mode_seek != 'none':
assert gen_z.shape[0] % 2 == 0
with torch.autograd.profiler.record_function('Gmain_forward'):
if self.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
if self.mode_seek == 'none':
gen_img, gen_mask, inputs = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl), defect_z = gen_defect_z, transfer = self.transfer, output_mask = True) # May get synced by Gpl.
elif self.mode_seek in ['w/mask', 'z/mask']:
gen_img, gen_mask, gen_half_mask, inputs = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl), defect_z = gen_defect_z, transfer = self.transfer, output_mask = True, mode_seek = self.mode_seek) # May get synced by Gpl.
elif self.mode_seek == 'w/img':
gen_img, gen_mask, gen_half_img, inputs = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl), defect_z = gen_defect_z, transfer = self.transfer, output_mask = True, mode_seek = self.mode_seek) # May get synced by Gpl.
else:
gen_img, inputs = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl), defect_z = gen_defect_z, transfer = self.transfer) # May get synced by Gpl.
if self.transfer == 'res_block_uni_dis':
gen_logits = self.run_D_uni(gen_img, gen_mask, gen_c, sync=False)
else:
gen_logits = self.run_D(gen_img, gen_c, sync=False)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits))
if self.transfer == 'res_block_match_dis':
if self.tanh_mask == 'late':
gen_mask = torch.tanh(self.tanh_k * gen_mask)
if self.mode_seek in ['w/mask', 'z/mask']:
gen_half_mask = torch.tanh(self.tanh_k * gen_half_mask)
gen_img_mask = torch.cat([gen_img, gen_mask], dim = 1)
gen_match_logits = self.run_D_match(gen_img_mask, gen_c, sync=False)
training_stats.report('Loss/scores/fake_match', gen_match_logits)
training_stats.report('Loss/signs/fake_match', gen_match_logits.sign())
loss_Gmain = loss_Gmain + self.lambda_match * torch.nn.functional.softplus(-gen_match_logits)
if self.mode_seek in ['w/mask', 'w/img', 'z/mask']:
assert len(inputs) == 2
assert gen_z.shape[0] % 2 == 0
half_batch_size = gen_z.shape[0] // 2
if self.mode_seek in ['w/mask', 'w/img']:
w = inputs[1]
w1, w2 = w[:half_batch_size], w[half_batch_size:]
if self.mode_seek == 'w/mask':
mask1, mask2 = gen_mask[:half_batch_size], gen_half_mask
loss_MS = (w1 - w2).abs().mean() / (mask1 - mask2).abs().mean()
elif self.mode_seek == 'w/img':
img1, img2 = gen_img[:half_batch_size], gen_half_img
loss_MS = (w1 - w2).abs().mean() / (img1 - img2).abs().mean()
elif self.mode_seek == 'z/mask':
z1, z2 = gen_defect_z[:half_batch_size], gen_defect_z[half_batch_size:]
mask1, mask2 = gen_mask[:half_batch_size], gen_half_mask
loss_MS = (z1 - z2).abs().mean() / (mask1 - mask2).abs().mean()
training_stats.report('Loss/mode_seek', loss_MS)
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
(loss_Gmain if self.mode_seek == 'none' else loss_Gmain + self.lambda_ms * loss_MS).mean().mul(gain).backward()
# Gpl: Apply path length regularization.
if do_Gpl:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_img, input_list = self.run_G(gen_z[:batch_size], gen_c[:batch_size], sync=sync, defect_z = gen_defect_z[:batch_size] if gen_defect_z is not None else None, transfer = self.transfer)
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients():
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=input_list, create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
(gen_img[:, 0, 0, 0] * 0 + loss_Gpl).mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if do_Dmain:
with torch.autograd.profiler.record_function('Dgen_forward'):
if self.transfer == 'res_block_uni_dis':
gen_img, gen_mask, _ = self.run_G(gen_z, gen_c, sync=False, defect_z = gen_defect_z, transfer = self.transfer, output_mask = True)
gen_logits = self.run_D_uni(gen_img, gen_mask, gen_c, sync=False)
else:
gen_img, _ = self.run_G(gen_z, gen_c, sync=False, defect_z = gen_defect_z, transfer = self.transfer)
gen_logits = self.run_D(gen_img, gen_c, sync=False) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if do_Dmain or do_Dr1:
name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(do_Dr1)
if self.transfer == 'res_block_uni_dis':
real_mask_tmp = real_mask.detach().requires_grad_(do_Dr1)
real_logits = self.run_D_uni(real_img_tmp, real_mask_tmp, real_c, sync=sync)
else:
real_logits = self.run_D(real_img_tmp, real_c, sync=sync)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if do_Dmain:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if do_Dr1:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(real_logits * 0 + loss_Dreal + loss_Dr1).mean().mul(gain).backward()
# D_matchmain: Minimize matching logits for generated images&masks.
loss_D_matchgen = 0
if do_D_matchmain:
with torch.autograd.profiler.record_function('D_matchgen_forward'):
gen_img, gen_mask, _ = self.run_G(gen_z, gen_c, sync=False, defect_z = gen_defect_z, transfer = self.transfer, output_mask = True)
if self.tanh_mask == 'late':
gen_mask = torch.tanh(self.tanh_k * gen_mask)
gen_img_mask = torch.cat([gen_img, gen_mask], dim = 1)
gen_logits = self.run_D_match(gen_img_mask, gen_c, sync=False) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake_match', gen_logits)
training_stats.report('Loss/signs/fake_match', gen_logits.sign())
loss_D_matchgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('D_matchgen_backward'):
loss_D_matchgen.mean().mul(gain).backward()
# D_matchmain: Maximize matching logits for real images&masks.
# D_matchr1: Apply R1 regularization.
if do_D_matchmain or do_D_matchr1:
name = 'D_matchreal_Dr1' if do_D_matchmain and do_D_matchr1 else 'D_matchreal' if do_D_matchmain else 'D_matchr1'
with torch.autograd.profiler.record_function(name + '_forward_match'):
real_img_tmp = real_img.detach().requires_grad_(do_D_matchr1)
real_mask_tmp = real_mask.detach().requires_grad_(do_D_matchr1)
real_img_mask_tmp = torch.cat([real_img_tmp, real_mask_tmp], dim = 1)
real_logits = self.run_D_match(real_img_mask_tmp, real_c, sync=sync)
training_stats.report('Loss/scores/real_match', real_logits)
training_stats.report('Loss/signs/real_match', real_logits.sign())
loss_D_matchreal = 0
if do_D_matchmain:
loss_D_matchreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
training_stats.report('Loss/D_match/loss', loss_D_matchgen + loss_D_matchreal)
loss_D_matchr1 = 0
if do_D_matchr1:
with torch.autograd.profiler.record_function('r1_grads_match'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_mask_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_D_matchr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty_match', r1_penalty)
training_stats.report('Loss/D_match/reg', loss_D_matchr1)
with torch.autograd.profiler.record_function(name + '_backward_match'):
(real_logits * 0 + loss_D_matchreal + loss_D_matchr1).mean().mul(gain).backward()
#----------------------------------------------------------------------------
| 18,203 | 56.974522 | 266 | py |
DFMGAN | DFMGAN-main/training/augment.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
# Coefficients of various wavelet decomposition low-pass filters.
wavelets = {
'haar': [0.7071067811865476, 0.7071067811865476],
'db1': [0.7071067811865476, 0.7071067811865476],
'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523],
'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125],
'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017],
'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236],
'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161],
'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427],
'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728],
'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148],
'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255],
'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609],
}
#----------------------------------------------------------------------------
# Helpers for constructing transformation matrices.
def matrix(*rows, device=None):
assert all(len(row) == len(rows[0]) for row in rows)
elems = [x for row in rows for x in row]
ref = [x for x in elems if isinstance(x, torch.Tensor)]
if len(ref) == 0:
return misc.constant(np.asarray(rows), device=device)
assert device is None or device == ref[0].device
elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems]
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
def translate2d(tx, ty, **kwargs):
return matrix(
[1, 0, tx],
[0, 1, ty],
[0, 0, 1],
**kwargs)
def translate3d(tx, ty, tz, **kwargs):
return matrix(
[1, 0, 0, tx],
[0, 1, 0, ty],
[0, 0, 1, tz],
[0, 0, 0, 1],
**kwargs)
def scale2d(sx, sy, **kwargs):
return matrix(
[sx, 0, 0],
[0, sy, 0],
[0, 0, 1],
**kwargs)
def scale3d(sx, sy, sz, **kwargs):
return matrix(
[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1],
**kwargs)
def rotate2d(theta, **kwargs):
return matrix(
[torch.cos(theta), torch.sin(-theta), 0],
[torch.sin(theta), torch.cos(theta), 0],
[0, 0, 1],
**kwargs)
def rotate3d(v, theta, **kwargs):
vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2]
s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c
return matrix(
[vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0],
[vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0],
[vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0],
[0, 0, 0, 1],
**kwargs)
def translate2d_inv(tx, ty, **kwargs):
return translate2d(-tx, -ty, **kwargs)
def scale2d_inv(sx, sy, **kwargs):
return scale2d(1 / sx, 1 / sy, **kwargs)
def rotate2d_inv(theta, **kwargs):
return rotate2d(-theta, **kwargs)
#----------------------------------------------------------------------------
# Versatile image augmentation pipeline from the paper
# "Training Generative Adversarial Networks with Limited Data".
#
# All augmentations are disabled by default; individual augmentations can
# be enabled by setting their probability multipliers to 1.
@persistence.persistent_class
class AugmentPipe(torch.nn.Module):
def __init__(self,
xflip=0, rotate90=0, xint=0, xint_max=0.125,
scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125,
brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1,
imgfilter=0, imgfilter_bands=[1,1,1,1], imgfilter_std=1,
noise=0, cutout=0, noise_std=0.1, cutout_size=0.5,
):
super().__init__()
self.register_buffer('p', torch.ones([])) # Overall multiplier for augmentation probability.
# Pixel blitting.
self.xflip = float(xflip) # Probability multiplier for x-flip.
self.rotate90 = float(rotate90) # Probability multiplier for 90 degree rotations.
self.xint = float(xint) # Probability multiplier for integer translation.
self.xint_max = float(xint_max) # Range of integer translation, relative to image dimensions.
# General geometric transformations.
self.scale = float(scale) # Probability multiplier for isotropic scaling.
self.rotate = float(rotate) # Probability multiplier for arbitrary rotation.
self.aniso = float(aniso) # Probability multiplier for anisotropic scaling.
self.xfrac = float(xfrac) # Probability multiplier for fractional translation.
self.scale_std = float(scale_std) # Log2 standard deviation of isotropic scaling.
self.rotate_max = float(rotate_max) # Range of arbitrary rotation, 1 = full circle.
self.aniso_std = float(aniso_std) # Log2 standard deviation of anisotropic scaling.
self.xfrac_std = float(xfrac_std) # Standard deviation of frational translation, relative to image dimensions.
# Color transformations.
self.brightness = float(brightness) # Probability multiplier for brightness.
self.contrast = float(contrast) # Probability multiplier for contrast.
self.lumaflip = float(lumaflip) # Probability multiplier for luma flip.
self.hue = float(hue) # Probability multiplier for hue rotation.
self.saturation = float(saturation) # Probability multiplier for saturation.
self.brightness_std = float(brightness_std) # Standard deviation of brightness.
self.contrast_std = float(contrast_std) # Log2 standard deviation of contrast.
self.hue_max = float(hue_max) # Range of hue rotation, 1 = full circle.
self.saturation_std = float(saturation_std) # Log2 standard deviation of saturation.
# Image-space filtering.
self.imgfilter = float(imgfilter) # Probability multiplier for image-space filtering.
self.imgfilter_bands = list(imgfilter_bands) # Probability multipliers for individual frequency bands.
self.imgfilter_std = float(imgfilter_std) # Log2 standard deviation of image-space filter amplification.
# Image-space corruptions.
self.noise = float(noise) # Probability multiplier for additive RGB noise.
self.cutout = float(cutout) # Probability multiplier for cutout.
self.noise_std = float(noise_std) # Standard deviation of additive RGB noise.
self.cutout_size = float(cutout_size) # Size of the cutout rectangle, relative to image dimensions.
# Setup orthogonal lowpass filter for geometric augmentations.
self.register_buffer('Hz_geom', upfirdn2d.setup_filter(wavelets['sym6']))
# Construct filter bank for image-space filtering.
Hz_lo = np.asarray(wavelets['sym2']) # H(z)
Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z)
Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2
Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2
Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i)
for i in range(1, Hz_fbank.shape[0]):
Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(Hz_fbank.shape[0], -1)[:, :-1]
Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2])
Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) // 2 : (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2
self.register_buffer('Hz_fbank', torch.as_tensor(Hz_fbank, dtype=torch.float32))
def forward(self, images, debug_percentile=None):
assert isinstance(images, torch.Tensor) and images.ndim == 4
batch_size, num_channels, height, width = images.shape
device = images.device
if debug_percentile is not None:
debug_percentile = torch.as_tensor(debug_percentile, dtype=torch.float32, device=device)
# -------------------------------------
# Select parameters for pixel blitting.
# -------------------------------------
# Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in
I_3 = torch.eye(3, device=device)
G_inv = I_3
# Apply x-flip with probability (xflip * strength).
if self.xflip > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 2)
i = torch.where(torch.rand([batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1)
# Apply 90 degree rotations with probability (rotate90 * strength).
if self.rotate90 > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 4)
i = torch.where(torch.rand([batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 4))
G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i)
# Apply integer translation with probability (xint * strength).
if self.xint > 0:
t = (torch.rand([batch_size, 2], device=device) * 2 - 1) * self.xint_max
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, (debug_percentile * 2 - 1) * self.xint_max)
G_inv = G_inv @ translate2d_inv(torch.round(t[:,0] * width), torch.round(t[:,1] * height))
# --------------------------------------------------------
# Select parameters for general geometric transformations.
# --------------------------------------------------------
# Apply isotropic scaling with probability (scale * strength).
if self.scale > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.scale_std)
s = torch.where(torch.rand([batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.scale_std))
G_inv = G_inv @ scale2d_inv(s, s)
# Apply pre-rotation with probability p_rot.
p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1)) # P(pre OR post) = p
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max)
G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling.
# Apply anisotropic scaling with probability (aniso * strength).
if self.aniso > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.aniso_std)
s = torch.where(torch.rand([batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.aniso_std))
G_inv = G_inv @ scale2d_inv(s, 1 / s)
# Apply post-rotation with probability p_rot.
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.zeros_like(theta)
G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling.
# Apply fractional translation with probability (xfrac * strength).
if self.xfrac > 0:
t = torch.randn([batch_size, 2], device=device) * self.xfrac_std
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, torch.erfinv(debug_percentile * 2 - 1) * self.xfrac_std)
G_inv = G_inv @ translate2d_inv(t[:,0] * width, t[:,1] * height)
# ----------------------------------
# Execute geometric transformations.
# ----------------------------------
# Execute if the transform is not identity.
if G_inv is not I_3:
# Calculate padding.
cx = (width - 1) / 2
cy = (height - 1) / 2
cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1], [-cx, cy, 1], device=device) # [idx, xyz]
cp = G_inv @ cp.t() # [batch, xyz, idx]
Hz_pad = self.Hz_geom.shape[0] // 4
margin = cp[:, :2, :].permute(1, 0, 2).flatten(1) # [xy, batch * idx]
margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1]
margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device)
margin = margin.max(misc.constant([0, 0] * 2, device=device))
margin = margin.min(misc.constant([width-1, height-1] * 2, device=device))
mx0, my0, mx1, my1 = margin.ceil().to(torch.int32)
# Pad image and adjust origin.
images = torch.nn.functional.pad(input=images, pad=[mx0,mx1,my0,my1], mode='reflect')
G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv
# Upsample.
images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2)
G_inv = scale2d(2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device)
G_inv = translate2d(-0.5, -0.5, device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device)
# Execute transformation.
shape = [batch_size, num_channels, (height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2]
G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(2 / shape[3], 2 / shape[2], device=device)
grid = torch.nn.functional.affine_grid(theta=G_inv[:,:2,:], size=shape, align_corners=False)
images = grid_sample_gradfix.grid_sample(images, grid)
# Downsample and crop.
images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True)
# --------------------------------------------
# Select parameters for color transformations.
# --------------------------------------------
# Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out
I_4 = torch.eye(4, device=device)
C = I_4
# Apply brightness with probability (brightness * strength).
if self.brightness > 0:
b = torch.randn([batch_size], device=device) * self.brightness_std
b = torch.where(torch.rand([batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b))
if debug_percentile is not None:
b = torch.full_like(b, torch.erfinv(debug_percentile * 2 - 1) * self.brightness_std)
C = translate3d(b, b, b) @ C
# Apply contrast with probability (contrast * strength).
if self.contrast > 0:
c = torch.exp2(torch.randn([batch_size], device=device) * self.contrast_std)
c = torch.where(torch.rand([batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c))
if debug_percentile is not None:
c = torch.full_like(c, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.contrast_std))
C = scale3d(c, c, c) @ C
# Apply luma flip with probability (lumaflip * strength).
v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device) # Luma axis.
if self.lumaflip > 0:
i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2)
i = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection.
# Apply hue rotation with probability (hue * strength).
if self.hue > 0 and num_channels > 1:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.hue_max
theta = torch.where(torch.rand([batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max)
C = rotate3d(v, theta) @ C # Rotate around v.
# Apply saturation with probability (saturation * strength).
if self.saturation > 0 and num_channels > 1:
s = torch.exp2(torch.randn([batch_size, 1, 1], device=device) * self.saturation_std)
s = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.saturation_std))
C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C
# ------------------------------
# Execute color transformations.
# ------------------------------
# Execute if the transform is not identity.
if C is not I_4:
images = images.reshape([batch_size, num_channels, height * width])
if num_channels == 3:
images = C[:, :3, :3] @ images + C[:, :3, 3:]
elif num_channels == 1:
C = C[:, :3, :].mean(dim=1, keepdims=True)
images = images * C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:]
else:
raise ValueError('Image must be RGB (3 channels) or L (1 channel)')
images = images.reshape([batch_size, num_channels, height, width])
# ----------------------
# Image-space filtering.
# ----------------------
if self.imgfilter > 0:
num_bands = self.Hz_fbank.shape[0]
assert len(self.imgfilter_bands) == num_bands
expected_power = misc.constant(np.array([10, 1, 1, 1]) / 13, device=device) # Expected power spectrum (1/f).
# Apply amplification for each band with probability (imgfilter * strength * band_strength).
g = torch.ones([batch_size, num_bands], device=device) # Global gain vector (identity).
for i, band_strength in enumerate(self.imgfilter_bands):
t_i = torch.exp2(torch.randn([batch_size], device=device) * self.imgfilter_std)
t_i = torch.where(torch.rand([batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i))
if debug_percentile is not None:
t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i)
t = torch.ones([batch_size, num_bands], device=device) # Temporary gain vector.
t[:, i] = t_i # Replace i'th element.
t = t / (expected_power * t.square()).sum(dim=-1, keepdims=True).sqrt() # Normalize power.
g = g * t # Accumulate into global gain.
# Construct combined amplification filter.
Hz_prime = g @ self.Hz_fbank # [batch, tap]
Hz_prime = Hz_prime.unsqueeze(1).repeat([1, num_channels, 1]) # [batch, channels, tap]
Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1]) # [batch * channels, 1, tap]
# Apply filter.
p = self.Hz_fbank.shape[1] // 2
images = images.reshape([1, batch_size * num_channels, height, width])
images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect')
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels)
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels)
images = images.reshape([batch_size, num_channels, height, width])
# ------------------------
# Image-space corruptions.
# ------------------------
# Apply additive RGB noise with probability (noise * strength).
if self.noise > 0:
sigma = torch.randn([batch_size, 1, 1, 1], device=device).abs() * self.noise_std
sigma = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma))
if debug_percentile is not None:
sigma = torch.full_like(sigma, torch.erfinv(debug_percentile) * self.noise_std)
images = images + torch.randn([batch_size, num_channels, height, width], device=device) * sigma
# Apply cutout with probability (cutout * strength).
if self.cutout > 0:
size = torch.full([batch_size, 2, 1, 1, 1], self.cutout_size, device=device)
size = torch.where(torch.rand([batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size))
center = torch.rand([batch_size, 2, 1, 1, 1], device=device)
if debug_percentile is not None:
size = torch.full_like(size, self.cutout_size)
center = torch.full_like(center, debug_percentile)
coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1])
coord_y = torch.arange(height, device=device).reshape([1, 1, -1, 1])
mask_x = (((coord_x + 0.5) / width - center[:, 0]).abs() >= size[:, 0] / 2)
mask_y = (((coord_y + 0.5) / height - center[:, 1]).abs() >= size[:, 1] / 2)
mask = torch.logical_or(mask_x, mask_y).to(torch.float32)
images = images * mask
return images
#----------------------------------------------------------------------------
| 26,373 | 60.050926 | 366 | py |
DFMGAN | DFMGAN-main/training/dataset.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import numpy as np
import zipfile
import PIL.Image
import json
import torch
import dnnlib
try:
import pyspng
except ImportError:
pyspng = None
#----------------------------------------------------------------------------
class Dataset(torch.utils.data.Dataset):
def __init__(self,
name, # Name of the dataset.
raw_shape, # Shape of the raw image data (NCHW).
max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip.
use_labels = False, # Enable conditioning labels? False = label dimension is zero.
xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size.
random_seed = 0, # Random seed to use when applying max_size.
):
self._name = name
self._raw_shape = list(raw_shape)
self._use_labels = use_labels
self._raw_labels = None
self._label_shape = None
# Apply max_size.
self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64)
if (max_size is not None) and (self._raw_idx.size > max_size):
np.random.RandomState(random_seed).shuffle(self._raw_idx)
self._raw_idx = np.sort(self._raw_idx[:max_size])
# Apply xflip.
self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8)
if xflip:
self._raw_idx = np.tile(self._raw_idx, 2)
self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)])
def _get_raw_labels(self):
if self._raw_labels is None:
self._raw_labels = self._load_raw_labels() if self._use_labels else None
if self._raw_labels is None:
self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32)
assert isinstance(self._raw_labels, np.ndarray)
assert self._raw_labels.shape[0] == self._raw_shape[0]
assert self._raw_labels.dtype in [np.float32, np.int64]
if self._raw_labels.dtype == np.int64:
assert self._raw_labels.ndim == 1
assert np.all(self._raw_labels >= 0)
return self._raw_labels
def close(self): # to be overridden by subclass
pass
def _load_raw_image(self, raw_idx): # to be overridden by subclass
raise NotImplementedError
def _load_raw_labels(self): # to be overridden by subclass
raise NotImplementedError
def __getstate__(self):
return dict(self.__dict__, _raw_labels=None)
def __del__(self):
try:
self.close()
except:
pass
def __len__(self):
return self._raw_idx.size
def __getitem__(self, idx):
image = self._load_raw_image(self._raw_idx[idx])
assert isinstance(image, np.ndarray)
assert list(image.shape) == self.image_shape
assert image.dtype == np.uint8
if self._xflip[idx]:
assert image.ndim == 3 # CHW
image = image[:, :, ::-1]
return image.copy(), self.get_label(idx)
def get_label(self, idx):
label = self._get_raw_labels()[self._raw_idx[idx]]
if label.dtype == np.int64:
onehot = np.zeros(self.label_shape, dtype=np.float32)
onehot[label] = 1
label = onehot
return label.copy()
def get_details(self, idx):
d = dnnlib.EasyDict()
d.raw_idx = int(self._raw_idx[idx])
d.xflip = (int(self._xflip[idx]) != 0)
d.raw_label = self._get_raw_labels()[d.raw_idx].copy()
return d
@property
def name(self):
return self._name
@property
def image_shape(self):
return list(self._raw_shape[1:])
@property
def num_channels(self):
assert len(self.image_shape) == 3 # CHW
return min(self.image_shape[0], 3)
@property
def resolution(self):
assert len(self.image_shape) == 3 # CHW
assert self.image_shape[1] == self.image_shape[2]
return self.image_shape[1]
@property
def label_shape(self):
if self._label_shape is None:
raw_labels = self._get_raw_labels()
if raw_labels.dtype == np.int64:
self._label_shape = [int(np.max(raw_labels)) + 1]
else:
self._label_shape = raw_labels.shape[1:]
return list(self._label_shape)
@property
def label_dim(self):
assert len(self.label_shape) == 1
return self.label_shape[0]
@property
def has_labels(self):
return any(x != 0 for x in self.label_shape)
@property
def has_onehot_labels(self):
return self._get_raw_labels().dtype == np.int64
#----------------------------------------------------------------------------
class ImageFolderDataset(Dataset):
def __init__(self,
path, # Path to directory or zip.
resolution = None, # Ensure specific resolution, None = highest available.
**super_kwargs, # Additional arguments for the Dataset base class.
):
self._path = path
self._zipfile = None
if os.path.isdir(self._path):
self._type = 'dir'
self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files}
elif self._file_ext(self._path) == '.zip':
self._type = 'zip'
self._all_fnames = set(self._get_zipfile().namelist())
else:
raise IOError('Path must point to a directory or zip')
PIL.Image.init()
self._image_fnames = sorted(fname for fname in self._all_fnames if ((self._file_ext(fname) in PIL.Image.EXTENSION) or self._file_ext(fname) == '.npy'))
if len(self._image_fnames) == 0:
raise IOError('No image files found in the specified path')
name = os.path.splitext(os.path.basename(self._path))[0]
raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape)
if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution):
raise IOError('Image files do not match the specified resolution')
super().__init__(name=name, raw_shape=raw_shape, **super_kwargs)
@staticmethod
def _file_ext(fname):
return os.path.splitext(fname)[1].lower()
def _get_zipfile(self):
assert self._type == 'zip'
if self._zipfile is None:
self._zipfile = zipfile.ZipFile(self._path)
return self._zipfile
def _open_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._path, fname), 'rb')
if self._type == 'zip':
return self._get_zipfile().open(fname, 'r')
return None
def close(self):
try:
if self._zipfile is not None:
self._zipfile.close()
finally:
self._zipfile = None
def __getstate__(self):
return dict(super().__getstate__(), _zipfile=None)
def _load_raw_image(self, raw_idx):
fname = self._image_fnames[raw_idx]
with self._open_file(fname) as f:
if pyspng is not None and self._file_ext(fname) == '.png':
image = pyspng.load(f.read())
elif self._file_ext(fname) == '.npy':
image = np.load(f)
else:
image = np.array(PIL.Image.open(f))
if image.ndim == 2:
image = image[:, :, np.newaxis] # HW => HWC
image = image.transpose(2, 0, 1) # HWC => CHW
return image
def _load_raw_labels(self):
fname = 'dataset.json'
if fname not in self._all_fnames:
return None
with self._open_file(fname) as f:
labels = json.load(f)['labels']
if labels is None:
return None
labels = dict(labels)
labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames]
labels = np.array(labels)
labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim])
return labels
#----------------------------------------------------------------------------
| 8,683 | 35.334728 | 159 | py |
DFMGAN | DFMGAN-main/training/networks.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
#----------------------------------------------------------------------------
@misc.profiled_function
def normalize_2nd_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise = None, # Optional noise tensor to add to the output activations.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
padding = 0, # Padding with respect to the upsampled image.
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate = True, # Apply weight demodulation?
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class Conv2dLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
channels_last = False, # Expect the input to have memory_format=channels_last?
trainable = True, # Update the weights of this layer during training?
):
super().__init__()
self.activation = activation
self.up = up
self.down = down
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.act_gain = bias_act.activation_funcs[activation].def_gain
memory_format = torch.channels_last if channels_last else torch.contiguous_format
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)
bias = torch.zeros([out_channels]) if bias else None
if trainable:
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias) if bias is not None else None
else:
self.register_buffer('weight', weight)
if bias is not None:
self.register_buffer('bias', bias)
else:
self.bias = None
def forward(self, x, gain=1):
w = self.weight * self.weight_gain
b = self.bias.to(x.dtype) if self.bias is not None else None
flip_weight = (self.up == 1) # slightly faster
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32))
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size = 3, # Convolution kernel size.
up = 1, # Integer upsampling factor.
use_noise = True, # Enable noise input?
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
channels_last = False, # Use channels_last format for the weights?
):
super().__init__()
self.resolution = resolution
self.up = up
self.use_noise = use_noise
self.activation = activation
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.act_gain = bias_act.activation_funcs[activation].def_gain
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
if use_noise:
self.register_buffer('noise_const', torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
assert noise_mode in ['random', 'const', 'none']
in_resolution = self.resolution // self.up
misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution])
styles = self.affine(w)
noise = None
if self.use_noise and noise_mode == 'random':
noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
if self.use_noise and noise_mode == 'const':
noise = self.noise_const * self.noise_strength
flip_weight = (self.up == 1) # slightly faster
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class ToRGBLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
super().__init__()
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.kernel_size = kernel_size
def forward(self, x, w, fused_modconv=True):
styles = self.affine(w) * self.weight_gain
auto_padding = self.kernel_size // 2
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv, padding = auto_padding)
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
torgb_type, # 'none', 'rgb', 'gen_mask', 'upsample_mask'
no_round,
tanh_mask,
tanh_k,
img_resolution = None,
mask_threshold = None,
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
self.torgb_type = torgb_type
self.mask_threshold = mask_threshold
self.no_round = no_round
self.tanh_mask = tanh_mask
self.tanh_k = tanh_k
self.img_resolution = img_resolution
self.upsample_pad = torch.nn.ReplicationPad2d(padding = 1)
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if (is_last or architecture == 'skip') and (torgb_type in ['rgb', 'gen_mask']):
if torgb_type == 'rgb':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
# All ToRGB
elif torgb_type == 'gen_mask':
self.torgb = ToRGBLayer(out_channels, 1, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet':
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, ws, res_x = None, force_fp32=False, fused_modconv=None, **layer_kwargs):
misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
w_iter = iter(ws.unbind(dim=1))
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
if fused_modconv is None:
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
fused_modconv = (not self.training) and (dtype == torch.float32 or int(x.shape[0]) == 1)
# Input.
if self.in_channels == 0:
x = self.const.to(dtype=dtype, memory_format=memory_format)
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
else:
misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
x = y.add_(x)
else:
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
# ResBlock exists
if res_x is not None:
assert x.dtype == res_x.dtype and x.shape == res_x.shape
x = x + res_x
#x = torch.where(res_x == 0.0, x, res_x)
# ToRGB.
if img is not None:
misc.assert_shape(img, [None, self.img_channels if self.torgb_type == 'rgb' else 1, self.resolution // 2, self.resolution // 2])
img = upfirdn2d.upsample2d(img, self.resample_filter)
if (self.is_last or self.architecture == 'skip') and (self.torgb_type in ['rgb', 'gen_mask']):
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32 if self.torgb_type == 'rgb' else torch.float16, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
if self.torgb_type == 'gen_mask' and self.tanh_mask == 'early':
img = torch.tanh(self.tanh_k * img)
if self.torgb_type in ['gen_mask', 'upsample_mask']:
assert(x.shape[0] == img.shape[0] and img.shape[1] == 1 and x.shape[2] == img.shape[2] and x.shape[3] == img.shape[3])
if self.no_round:
x = x * (img / 2.0 + 0.5)
else:
x = x * (img >= self.mask_threshold)
assert x.dtype == dtype
if self.torgb_type == 'none':
return x
assert img.dtype == (torch.float32 if self.torgb_type == 'rgb' else torch.float16)
return x, img
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.num_ws = 0
for res in self.block_resolutions:
in_channels = channels_dict[res // 2] if res > 4 else 0
out_channels = channels_dict[res]
use_fp16 = (res >= fp16_resolution)
is_last = (res == self.img_resolution)
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, torgb_type = 'rgb', **block_kwargs)
self.num_ws += block.num_conv
if is_last:
self.num_ws += block.num_torgb
setattr(self, f'b{res}', block)
def forward(self, ws, **block_kwargs):
block_ws = []
with torch.autograd.profiler.record_function('split_ws'):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
ws = ws.to(torch.float32)
w_idx = 0
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
w_idx += block.num_conv
x = img = None
for res, cur_ws in zip(self.block_resolutions, block_ws):
block = getattr(self, f'b{res}')
x, img = block(x, img, cur_ws, **block_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisResNet(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
res_st,
mask_threshold = 0.0,
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.res_st = res_st
self.num_ws = 0
self.num_defect_ws = 0
for res in self.block_resolutions:
in_channels = channels_dict[res // 2] if res > 4 else 0
out_channels = channels_dict[res]
use_fp16 = (res >= fp16_resolution)
is_last = (res == self.img_resolution)
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, torgb_type = 'rgb', **block_kwargs)
self.num_ws += block.num_conv
if is_last:
self.num_ws += block.num_torgb
setattr(self, f'b{res}', block)
if res >= self.res_st:
res_block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, torgb_type = 'gen_mask' if res == self.res_st else 'upsample_mask', mask_threshold=mask_threshold, **block_kwargs)
self.num_defect_ws += res_block.num_conv
if res == self.res_st:
self.num_defect_ws += 1
setattr(self, f'res_b{res}', res_block)
def forward(self, ws, defect_ws, fix_residual_to_zero = False, output_mask = False, **block_kwargs):
block_ws, res_block_ws = [], [None for _ in range(int(np.log2(self.res_st)) - 2)]
with torch.autograd.profiler.record_function('split_ws'):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
misc.assert_shape(defect_ws, [None, self.num_defect_ws, self.w_dim])
ws = ws.to(torch.float32)
defect_ws = defect_ws.to(torch.float32)
w_idx = defect_w_idx = 0
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
w_idx += block.num_conv
if res >= self.res_st:
res_block = getattr(self, f'res_b{res}')
res_block_ws.append(defect_ws.narrow(1, defect_w_idx, res_block.num_conv + res_block.num_torgb))
defect_w_idx += res_block.num_conv
x = img = None
for res, cur_ws, cur_res_ws in zip(self.block_resolutions, block_ws, res_block_ws):
res_x = None
if res >= self.res_st and not fix_residual_to_zero:
res_block = getattr(self, f'res_b{res}')
res_x, mask = res_block(x, None if res == self.res_st else mask, cur_res_ws, **block_kwargs)
block = getattr(self, f'b{res}')
x, img = block(x, img, cur_ws, res_x = res_x, **block_kwargs)
if output_mask:
return img, mask
else:
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
transfer,
mask_threshold = 0.0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
synthesis_kwargs = {}, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.transfer = transfer
self.mask_threshold = mask_threshold
if self.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
self.synthesis = SynthesisResNet(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, mask_threshold=self.mask_threshold, **synthesis_kwargs)
else:
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=0 if self.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis'] else c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
if self.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
self.num_defect_ws = self.synthesis.num_defect_ws
self.defect_mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_defect_ws, **mapping_kwargs)
def forward(self, z, c, defect_z=None, output_mask = False, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
if self.transfer == 'dual_mod':
defect_ws = self.defect_mapping(defect_z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
ws += defect_ws
if self.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_ws = self.defect_mapping(defect_z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
if output_mask:
img, mask = self.synthesis(ws, defect_ws, output_mask = output_mask, **synthesis_kwargs)
else:
img = self.synthesis(ws, defect_ws, **synthesis_kwargs)
else:
img = self.synthesis(ws, **synthesis_kwargs)
if output_mask:
return img, mask
else:
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
tmp_channels, # Number of intermediate channels.
out_channels, # Number of output channels.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
first_layer_idx, # Index of the first layer.
block_type = 'rgb', # 'rgb', 'mask', 'uni'
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
freeze_layers = 0, # Freeze-D: Number of layers to freeze.
):
assert in_channels in [0, tmp_channels]
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.resolution = resolution
self.img_channels = img_channels
self.first_layer_idx = first_layer_idx
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.block_type = block_type
self.num_layers = 0
def trainable_gen():
while True:
layer_idx = self.first_layer_idx + self.num_layers
trainable = (layer_idx >= freeze_layers)
self.num_layers += 1
yield trainable
trainable_iter = trainable_gen()
if in_channels == 0 or architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels if self.block_type == 'rgb' else 1, tmp_channels, kernel_size=1, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
if architecture == 'resnet':
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, force_fp32=False):
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
# Input.
if x is not None:
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution])
x = x.to(dtype=dtype, memory_format=memory_format)
# FromRGB.
if self.in_channels == 0 or self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels if self.block_type == 'rgb' else 1, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
y = self.fromrgb(img)
x = x + y if x is not None else y
img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None
# Main layers.
if self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
x = self.conv1(x, gain=np.sqrt(0.5))
x = y.add_(x)
else:
x = self.conv0(x)
x = self.conv1(x)
assert x.dtype == dtype
return x, img
#----------------------------------------------------------------------------
@persistence.persistent_class
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, group_size, num_channels=1):
super().__init__()
self.group_size = group_size
self.num_channels = num_channels
def forward(self, x):
N, C, H, W = x.shape
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N
F = self.num_channels
c = C // F
y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels.
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorEpilogue(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.cmap_dim = cmap_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
if architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation)
self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp)
self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation)
self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim)
def forward(self, x, img, cmap, force_fp32=False):
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW]
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
# FromRGB.
x = x.to(dtype=dtype, memory_format=memory_format)
if self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
x = x + self.fromrgb(img)
# Main layers.
if self.mbstd is not None:
x = self.mbstd(x)
x = self.conv(x)
x = self.fc(x.flatten(1))
x = self.out(x)
# Conditioning.
if self.cmap_dim > 0:
misc.assert_shape(cmap, [None, self.cmap_dim])
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
assert x.dtype == dtype
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class Discriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, c, **block_kwargs):
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorUnified(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
uni_st,
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.uni_st = uni_st
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
if res > self.uni_st:
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, block_type = 'rgb', **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
mask_block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, block_type = 'mask', **block_kwargs, **common_kwargs)
setattr(self, f'mask_b{res}', mask_block)
else:
block = DiscriminatorBlock((in_channels * 2) if res == self.uni_st else in_channels, (tmp_channels * 2) if res == self.uni_st else tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, block_type = 'uni', **block_kwargs, **common_kwargs)
setattr(self, f'uni_b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.uni_b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, mask, c, **block_kwargs):
x = None
x_mask = None
for res in self.block_resolutions:
if res > self.uni_st:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
mask_block = getattr(self, f'mask_b{res}')
x_mask, mask = mask_block(x_mask, mask, **block_kwargs)
else:
if res == self.uni_st:
x_uni = torch.cat([x, x_mask], dim = 1)
uni_block = getattr(self, f'uni_b{res}')
x_uni, _ = uni_block(x_uni, None, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.uni_b4(x_uni, None, cmap)
return x
#---------------------------------------------------------------------------- | 49,430 | 50.544317 | 199 | py |
DFMGAN | DFMGAN-main/training/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# empty
| 435 | 42.6 | 76 | py |
DFMGAN | DFMGAN-main/training/training_loop.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from re import L
import time
import copy
import json
import pickle
import psutil
import PIL.Image
import numpy as np
import torch
import dnnlib
from torch_utils import misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import grid_sample_gradfix
import legacy
from metrics import metric_main
import itertools
#----------------------------------------------------------------------------
def setup_snapshot_image_grid(training_set, random_seed=0):
rnd = np.random.RandomState(random_seed)
gw = np.clip(2560 // training_set.image_shape[2], 7, 32)
gh = np.clip(2048 // training_set.image_shape[1], 4, 32)
# No labels => show random subset of training samples.
if not training_set.has_labels:
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
else:
# Group training samples by label.
label_groups = dict() # label => [idx, ...]
for idx in range(len(training_set)):
label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
if label not in label_groups:
label_groups[label] = []
label_groups[label].append(idx)
# Reorder.
label_order = sorted(label_groups.keys())
for label in label_order:
rnd.shuffle(label_groups[label])
# Organize into grid.
grid_indices = []
for y in range(gh):
label = label_order[y % len(label_order)]
indices = label_groups[label]
grid_indices += [indices[x % len(indices)] for x in range(gw)]
label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))]
# Load data.
images, labels = zip(*[training_set[i] for i in grid_indices])
return (gw, gh), np.stack(images), np.stack(labels)
#----------------------------------------------------------------------------
def save_image_grid(img, fname, drange, grid_size):
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape(gh, gw, C, H, W)
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape(gh * H, gw * W, C)
assert C in [1, 3]
if C == 1:
PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
if C == 3:
PIL.Image.fromarray(img, 'RGB').save(fname)
#----------------------------------------------------------------------------
def training_loop(
run_dir = '.', # Output directory.
training_set_kwargs = {}, # Options for training set.
data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, # Options for generator network.
D_kwargs = {}, # Options for discriminator network.
G_opt_kwargs = {}, # Options for generator optimizer.
D_opt_kwargs = {}, # Options for discriminator optimizer.
augment_kwargs = None, # Options for augmentation pipeline. None = disable.
loss_kwargs = {}, # Options for loss function.
metrics = [], # Metrics to evaluate during training.
random_seed = 0, # Global random seed.
num_gpus = 1, # Number of GPUs participating in the training.
rank = 0, # Rank of the current process in [0, num_gpus[.
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
batch_gpu = 4, # Number of samples processed at a time by one GPU.
ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
ema_rampup = None, # EMA ramp-up coefficient.
G_reg_interval = 4, # How often to perform regularization for G? None = disable lazy regularization.
D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.
augment_p = 0, # Initial value of augmentation probability.
ada_target = None, # ADA target value. None = fixed p.
ada_interval = 4, # How often to perform ADA adjustment?
ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
resume_pkl = None, # Network pickle to resume training from.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
allow_tf32 = False, # Enable torch.backends.cuda.matmul.allow_tf32 and torch.backends.cudnn.allow_tf32?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
# DFMGAN args
ft = None,
D_match_kwargs = {},
D_match_opt_kwargs = {},
D_match_reg_interval = 16,
):
# Initialize.
start_time = time.time()
device = torch.device('cuda', rank)
np.random.seed(random_seed * num_gpus + rank)
torch.manual_seed(random_seed * num_gpus + rank)
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for matmul
torch.backends.cudnn.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for convolutions
conv2d_gradfix.enabled = True # Improves training speed.
grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.
# Load training set.
if rank == 0:
print('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
if rank == 0:
print()
print('Num images: ', len(training_set))
print('Image shape:', training_set.image_shape)
print('Label shape:', training_set.label_shape)
print()
# Construct networks.
if rank == 0:
print('Constructing networks...')
common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
G_ema = copy.deepcopy(G).eval()
if G_kwargs.transfer == 'res_block_match_dis':
common_match_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels + 1)
D_match = dnnlib.util.construct_class_by_name(**D_match_kwargs, **common_match_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
# Resume from existing pickle.
if (resume_pkl is not None) and (rank == 0):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = legacy.load_network_pkl(f)
for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
copied_list = misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
# if rank == 0:
# print('%s copied parameters and buffers:' % name)
# print(copied_list)
# Print network summary tables.
if rank == 0:
z = torch.empty([batch_gpu, G.z_dim], device=device)
c = torch.empty([batch_gpu, G.c_dim], device=device)
input_list = [z, c]
if G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_z = torch.empty([batch_gpu, G.z_dim], device=device)
input_list.append(defect_z)
if G_kwargs.transfer == 'res_block_match_dis':
input_list.append(True)
img, mask = misc.print_module_summary(G, input_list)
misc.print_module_summary(D, [img, c])
misc.print_module_summary(D_match, [torch.cat([img, mask], dim = 1), c])
elif G_kwargs.transfer == 'res_block_uni_dis':
input_list.append(True)
img, mask = misc.print_module_summary(G, input_list)
misc.print_module_summary(D, [img, mask, c])
else:
img = misc.print_module_summary(G, input_list)
misc.print_module_summary(D, [img, c])
# Setup augmentation.
if rank == 0:
print('Setting up augmentation...')
augment_pipe = None
ada_stats = None
if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
augment_pipe.p.copy_(torch.as_tensor(augment_p))
if ada_target is not None:
ada_stats = training_stats.Collector(regex='Loss/signs/real')
# Distribute across GPUs.
if rank == 0:
print(f'Distributing across {num_gpus} GPUs...')
ddp_modules = dict()
module_list = [('G_mapping', G.mapping), ('G_synthesis', G.synthesis), ('D', D), (None, G_ema), ('augment_pipe', augment_pipe)]
if G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
module_list.append(('G_defect_mapping', G.defect_mapping))
if G_kwargs.transfer == 'res_block_match_dis':
module_list.append(('D_match', D_match))
for name, module in module_list:
if (num_gpus > 1) and (module is not None) and len(list(module.parameters())) != 0:
module.requires_grad_(True)
module = torch.nn.parallel.DistributedDataParallel(module, device_ids=[device], broadcast_buffers=False, find_unused_parameters = True)
module.requires_grad_(False)
if name is not None:
ddp_modules[name] = module
# Setup training phases.
if rank == 0:
print('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) # subclass of training.loss.Loss
phases = []
training_nets = [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]
if G_kwargs.transfer == 'res_block_match_dis':
training_nets.append(('D_match', D_match, D_match_opt_kwargs, D_match_reg_interval))
for name, module, opt_kwargs, reg_interval in training_nets:
num_param = 0
if name == 'D':
if G_kwargs.transfer == 'res_block_uni_dis':
for res in [4, 8, 16, 32, 64, 128, 256]:
target_param = []
if res > D_kwargs.uni_st:
num_param += sum([p.numel() for p in getattr(module, f'mask_b{res}').parameters()])
target_param.append(getattr(module, f'mask_b{res}').parameters())
else:
num_param += sum([p.numel() for p in getattr(module, f'uni_b{res}').parameters()])
target_param.append(getattr(module, f'uni_b{res}').parameters())
target_param = itertools.chain(*target_param)
else:
num_param = sum([p.numel() for p in module.parameters()])
target_param = module.parameters()
elif name == 'D_match':
num_param = sum([p.numel() for p in module.parameters()])
target_param = module.parameters()
elif name == 'G':
if ft == 'default':
num_param = sum([p.numel() for p in module.parameters()])
target_param = module.parameters()
elif ft == 'ft_map':
num_param = sum([p.numel() for p in module.mapping.parameters()])
target_param = module.mapping.parameters()
elif ft == 'ft_syn':
num_param = sum([p.numel() for p in module.synthesis.parameters()])
target_param = module.synthesis.parameters()
elif ft.startswith('ft_syn_'):
num_trainable_block = int(ft.split('_')[-1])
syn_modules = [module.synthesis.b4, module.synthesis.b8, module.synthesis.b16, module.synthesis.b32, module.synthesis.b64, module.synthesis.b128, module.synthesis.b256]
target_param = itertools.chain(*[mod.parameters() for mod in syn_modules[:num_trainable_block]])
num_param = sum([p.numel() for p in target_param])
target_param = itertools.chain(*[mod.parameters() for mod in syn_modules[:num_trainable_block]])
elif ft.startswith('ft_map_syn_'):
num_trainable_block = int(ft.split('_')[-1])
syn_modules = [module.synthesis.b4, module.synthesis.b8, module.synthesis.b16, module.synthesis.b32, module.synthesis.b64, module.synthesis.b128, module.synthesis.b256]
target_param = itertools.chain(*[mod.parameters() for mod in syn_modules[:num_trainable_block]], module.mapping.parameters())
num_param = sum([p.numel() for p in target_param])
target_param = itertools.chain(*[mod.parameters() for mod in syn_modules[:num_trainable_block]], module.mapping.parameters())
elif ft == 'transfer':
if G_kwargs.transfer == 'dual_mod':
target_param = module.defect_mapping.parameters()
num_param = sum([p.numel() for p in target_param])
target_param = module.defect_mapping.parameters()
elif G_kwargs.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
target_param = [module.defect_mapping.parameters()]
num_param += sum([p.numel() for p in module.defect_mapping.parameters()])
for res in [4, 8, 16, 32, 64, 128, 256]:
if res >= G_kwargs.synthesis_kwargs.res_st:
target_param.append(getattr(module.synthesis, f'res_b{res}').parameters())
num_param += sum([p.numel() for p in getattr(module.synthesis, f'res_b{res}').parameters()])
target_param = itertools.chain(*target_param)
if rank == 0:
print('Training %d params of %s' % (num_param, name))
if reg_interval is None:
opt = dnnlib.util.construct_class_by_name(params=target_param, **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]
else: # Lazy regularization.
mb_ratio = reg_interval / (reg_interval + 1)
opt_kwargs = dnnlib.EasyDict(opt_kwargs)
opt_kwargs.lr = opt_kwargs.lr * mb_ratio
opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
opt = dnnlib.util.construct_class_by_name(target_param, **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]
phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if rank == 0:
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
# Export sample images.
grid_size = None
grid_z = None
grid_c = None
grid_defect_z = None
if rank == 0:
print('Exporting sample images...')
grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set)
if G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
images = images[:, :3, :, :]
save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
if G_kwargs.transfer == 'none':
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
elif G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
grid_defect_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
images = torch.cat([G_ema(z=z, c=c, defect_z=defect_z, noise_mode='const').cpu() for z, c, defect_z in zip(grid_z, grid_c, grid_defect_z)]).numpy()
save_image_grid(images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
# Initialize logs.
if rank == 0:
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if rank == 0:
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
# Train.
if rank == 0:
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = 0
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
batch_idx = 0
if progress_fn is not None:
progress_fn(0, total_kimg)
while True:
# Fetch training data.
with torch.autograd.profiler.record_function('data_fetch'):
phase_real_img, phase_real_c = next(training_set_iterator)
if G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
assert phase_real_img.ndim == 4 and phase_real_img.shape[1] == 4
phase_real_img, phase_real_mask = phase_real_img[:, :3, :, :], phase_real_img[:, 3:, :, :]
phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
if G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
phase_real_mask = (phase_real_mask.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
phase_real_c = phase_real_c.to(device).split(batch_gpu)
all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range(len(phases) * batch_size)]
all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)
all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
all_gen_defect_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_defect_z = [phase_gen_defect_z.split(batch_gpu) for phase_gen_defect_z in all_gen_defect_z.split(batch_size)]
# Execute training phases.
if G_kwargs.transfer == 'none':
zip_iter = zip(phases, all_gen_z, all_gen_c)
elif G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
zip_iter = zip(phases, all_gen_z, all_gen_c, all_gen_defect_z)
for iter_cmbn in zip_iter:
phase, phase_gen_z, phase_gen_c = iter_cmbn[:3]
if G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
phase_gen_defect_z = iter_cmbn[3]
if batch_idx % phase.interval != 0:
continue
# Initialize gradient accumulation.
if phase.start_event is not None:
phase.start_event.record(torch.cuda.current_stream(device))
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
# Accumulate gradients over multiple rounds.
if G_kwargs.transfer == 'none':
enum_iter = enumerate(zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c))
elif G_kwargs.transfer in ['dual_mod', 'res_block']:
enum_iter = enumerate(zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c, phase_gen_defect_z))
elif G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
enum_iter = enumerate(zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c, phase_gen_defect_z, phase_real_mask))
for round_idx, iter_cmbn_2 in enum_iter:
real_img, real_c, gen_z, gen_c = iter_cmbn_2[:4]
gen_defect_z = None
real_mask = None
gen_defect_z2 = None
if G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
gen_defect_z = iter_cmbn_2[4]
if G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
real_mask = iter_cmbn_2[5]
sync = (round_idx == batch_size // (batch_gpu * num_gpus) - 1)
gain = phase.interval
loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, sync=sync, gain=gain,
gen_defect_z = gen_defect_z, real_mask = real_mask, mask_threshold = G_kwargs.mask_threshold)
# Update weights.
phase.module.requires_grad_(False)
with torch.autograd.profiler.record_function(phase.name + '_opt'):
for param in phase.module.parameters():
if param.grad is not None:
misc.nan_to_num(param.grad, nan=0, posinf=1e5, neginf=-1e5, out=param.grad)
phase.opt.step()
if phase.end_event is not None:
phase.end_event.record(torch.cuda.current_stream(device))
# Update G_ema.
with torch.autograd.profiler.record_function('Gema'):
ema_nimg = ema_kimg * 1000
if ema_rampup is not None:
ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
for p_ema, p in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
# Update state.
cur_nimg += batch_size
batch_idx += 1
# Execute ADA heuristic.
if (ada_stats is not None) and (batch_idx % ada_interval == 0):
ada_stats.update()
adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000)
augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line, accumulating the same information in stats_collector.
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
if rank == 0:
print(' '.join(fields))
# Check for abort.
if (not done) and (abort_fn is not None) and abort_fn():
done = True
if rank == 0:
print()
print('Aborting...')
# Save image snapshot.
if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
if G_kwargs.transfer == 'none':
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
elif G_kwargs.transfer == 'dual_mod':
images = torch.cat([G_ema(z=z, c=c, defect_z=defect_z, noise_mode='const').cpu() for z, c, defect_z in zip(grid_z, grid_c, grid_defect_z)]).numpy()
elif G_kwargs.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
images, masks = [], []
for z, c, defect_z in zip(grid_z, grid_c, grid_defect_z):
image, mask = G_ema(z=z, c=c, defect_z=defect_z, output_mask = True, noise_mode='const')
images.append(image.cpu())
masks.append(mask.cpu())
images = torch.cat(images).numpy()
masks = torch.cat(masks).numpy()
save_image_grid(masks, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_mask.png'), drange=[-1,1], grid_size=grid_size)
masks[masks >= G_kwargs.mask_threshold] = 1.0
masks[masks < G_kwargs.mask_threshold] = -1.0
save_image_grid(masks, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_rounded_mask.png'), drange=[-1,1], grid_size=grid_size)
save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_img.png'), drange=[-1,1], grid_size=grid_size)
# Save network snapshot.
snapshot_pkl = None
snapshot_data = None
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs))
saving_modules = [('G', G), ('D', D), ('G_ema', G_ema), ('augment_pipe', augment_pipe)]
if G_kwargs.transfer == 'res_block_match_dis':
saving_modules.append(('D_match', D_match))
for name, module in saving_modules:
if module is not None:
if num_gpus > 1:
misc.check_ddp_consistency(module, ignore_regex=r'.*\.w_avg')
module = copy.deepcopy(module).eval().requires_grad_(False).cpu()
snapshot_data[name] = module
del module # conserve memory
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
if rank == 0:
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
# Evaluate metrics.
if (snapshot_data is not None) and (len(metrics) > 0):
if rank == 0:
print('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data # conserve memory
# Collect statistics.
for phase in phases:
value = []
if (phase.start_event is not None) and (phase.end_event is not None):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0('Timing/' + phase.name, value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
# Update logs.
timestamp = time.time()
if stats_jsonl is not None:
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write(json.dumps(fields) + '\n')
stats_jsonl.flush()
if stats_tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for name, value in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Update state.
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done.
if rank == 0:
print()
print('Exiting...')
#----------------------------------------------------------------------------
| 31,481 | 53.27931 | 184 | py |
DFMGAN | DFMGAN-main/torch_utils/custom_ops.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import glob
import torch
import torch.utils.cpp_extension
import importlib
import hashlib
import shutil
from pathlib import Path
from torch.utils.file_baton import FileBaton
#----------------------------------------------------------------------------
# Global options.
verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
patterns = [
'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
]
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if len(matches):
return matches[-1]
return None
#----------------------------------------------------------------------------
# Main entry point for compiling and loading C++/CUDA plugins.
_cached_plugins = dict()
def get_plugin(module_name, sources, **build_kwargs):
assert verbosity in ['none', 'brief', 'full']
# Already cached?
if module_name in _cached_plugins:
return _cached_plugins[module_name]
# Print status.
if verbosity == 'full':
print(f'Setting up PyTorch plugin "{module_name}"...')
elif verbosity == 'brief':
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
try: # pylint: disable=too-many-nested-blocks
# Make sure we can find the necessary compiler binaries.
if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += ';' + compiler_bindir
# Compile and load.
verbose_build = (verbosity == 'full')
# Incremental build md5sum trickery. Copies all the input source files
# into a cached build directory under a combined md5 digest of the input
# source files. Copying is done only if the combined digest has changed.
# This keeps input file timestamps and filenames the same as in previous
# extension builds, allowing for fast incremental rebuilds.
#
# This optimization is done only in case all the source files reside in
# a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
# environment variable is set (we take this as a signal that the user
# actually cares about this.)
source_dirs_set = set(os.path.dirname(source) for source in sources)
if len(source_dirs_set) == 1 and ('TORCH_EXTENSIONS_DIR' in os.environ):
all_source_files = sorted(list(x for x in Path(list(source_dirs_set)[0]).iterdir() if x.is_file()))
# Compute a combined hash digest for all source files in the same
# custom op directory (usually .cu, .cpp, .py and .h files).
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
build_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
digest_build_dir = os.path.join(build_dir, hash_md5.hexdigest())
if not os.path.isdir(digest_build_dir):
os.makedirs(digest_build_dir, exist_ok=True)
baton = FileBaton(os.path.join(digest_build_dir, 'lock'))
if baton.try_acquire():
try:
for src in all_source_files:
shutil.copyfile(src, os.path.join(digest_build_dir, os.path.basename(src)))
finally:
baton.release()
else:
# Someone else is copying source files under the digest dir,
# wait until done and continue.
baton.wait()
digest_sources = [os.path.join(digest_build_dir, os.path.basename(x)) for x in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=build_dir,
verbose=verbose_build, sources=digest_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
module = importlib.import_module(module_name)
except:
if verbosity == 'brief':
print('Failed!')
raise
# Print status and add to cache.
if verbosity == 'full':
print(f'Done setting up PyTorch plugin "{module_name}".')
elif verbosity == 'brief':
print('Done.')
_cached_plugins[module_name] = module
return module
#----------------------------------------------------------------------------
| 5,644 | 43.448819 | 146 | py |
DFMGAN | DFMGAN-main/torch_utils/training_stats.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for reporting and collecting training statistics across
multiple processes and devices. The interface is designed to minimize
synchronization overhead as well as the amount of boilerplate in user
code."""
import re
import numpy as np
import torch
import dnnlib
from . import misc
#----------------------------------------------------------------------------
_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction.
_counter_dtype = torch.float64 # Data type to use for the internal counters.
_rank = 0 # Rank of the current process.
_sync_device = None # Device to use for multiprocess communication. None = single-process.
_sync_called = False # Has _sync() been called yet?
_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor
_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
#----------------------------------------------------------------------------
def init_multiprocessing(rank, sync_device):
r"""Initializes `torch_utils.training_stats` for collecting statistics
across multiple processes.
This function must be called after
`torch.distributed.init_process_group()` and before `Collector.update()`.
The call is not necessary if multi-process collection is not needed.
Args:
rank: Rank of the current process.
sync_device: PyTorch device to use for inter-process
communication, or None to disable multi-process
collection. Typically `torch.device('cuda', rank)`.
"""
global _rank, _sync_device
assert not _sync_called
_rank = rank
_sync_device = sync_device
#----------------------------------------------------------------------------
@misc.profiled_function
def report(name, value):
r"""Broadcasts the given set of scalars to all interested instances of
`Collector`, across device and process boundaries.
This function is expected to be extremely cheap and can be safely
called from anywhere in the training loop, loss function, or inside a
`torch.nn.Module`.
Warning: The current implementation expects the set of unique names to
be consistent across processes. Please make sure that `report()` is
called at least once for each unique name by each process, and in the
same order. If a given process has no scalars to broadcast, it can do
`report(name, [])` (empty list).
Args:
name: Arbitrary string specifying the name of the statistic.
Averages are accumulated separately for each unique name.
value: Arbitrary set of scalars. Can be a list, tuple,
NumPy array, PyTorch tensor, or Python scalar.
Returns:
The same `value` that was passed in.
"""
if name not in _counters:
_counters[name] = dict()
elems = torch.as_tensor(value)
if elems.numel() == 0:
return value
elems = elems.detach().flatten().to(_reduce_dtype)
moments = torch.stack([
torch.ones_like(elems).sum(),
elems.sum(),
elems.square().sum(),
])
assert moments.ndim == 1 and moments.shape[0] == _num_moments
moments = moments.to(_counter_dtype)
device = moments.device
if device not in _counters[name]:
_counters[name][device] = torch.zeros_like(moments)
_counters[name][device].add_(moments)
return value
#----------------------------------------------------------------------------
def report0(name, value):
r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
but ignores any scalars provided by the other processes.
See `report()` for further details.
"""
report(name, value if _rank == 0 else [])
return value
#----------------------------------------------------------------------------
class Collector:
r"""Collects the scalars broadcasted by `report()` and `report0()` and
computes their long-term averages (mean and standard deviation) over
user-defined periods of time.
The averages are first collected into internal counters that are not
directly visible to the user. They are then copied to the user-visible
state as a result of calling `update()` and can then be queried using
`mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
internal counters for the next round, so that the user-visible state
effectively reflects averages collected between the last two calls to
`update()`.
Args:
regex: Regular expression defining which statistics to
collect. The default is to collect everything.
keep_previous: Whether to retain the previous averages if no
scalars were collected on a given round
(default: True).
"""
def __init__(self, regex='.*', keep_previous=True):
self._regex = re.compile(regex)
self._keep_previous = keep_previous
self._cumulative = dict()
self._moments = dict()
self.update()
self._moments.clear()
def names(self):
r"""Returns the names of all statistics broadcasted so far that
match the regular expression specified at construction time.
"""
return [name for name in _counters if self._regex.fullmatch(name)]
def update(self):
r"""Copies current values of the internal counters to the
user-visible state and resets them for the next round.
If `keep_previous=True` was specified at construction time, the
operation is skipped for statistics that have received no scalars
since the last update, retaining their previous averages.
This method performs a number of GPU-to-CPU transfers and one
`torch.distributed.all_reduce()`. It is intended to be called
periodically in the main training loop, typically once every
N training steps.
"""
if not self._keep_previous:
self._moments.clear()
for name, cumulative in _sync(self.names()):
if name not in self._cumulative:
self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
delta = cumulative - self._cumulative[name]
self._cumulative[name].copy_(cumulative)
if float(delta[0]) != 0:
self._moments[name] = delta
def _get_delta(self, name):
r"""Returns the raw moments that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
assert self._regex.fullmatch(name)
if name not in self._moments:
self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
return self._moments[name]
def num(self, name):
r"""Returns the number of scalars that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
delta = self._get_delta(name)
return int(delta[0])
def mean(self, name):
r"""Returns the mean of the scalars that were accumulated for the
given statistic between the last two calls to `update()`, or NaN if
no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0:
return float('nan')
return float(delta[1] / delta[0])
def std(self, name):
r"""Returns the standard deviation of the scalars that were
accumulated for the given statistic between the last two calls to
`update()`, or NaN if no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
return float('nan')
if int(delta[0]) == 1:
return float(0)
mean = float(delta[1] / delta[0])
raw_var = float(delta[2] / delta[0])
return np.sqrt(max(raw_var - np.square(mean), 0))
def as_dict(self):
r"""Returns the averages accumulated between the last two calls to
`update()` as an `dnnlib.EasyDict`. The contents are as follows:
dnnlib.EasyDict(
NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
...
)
"""
stats = dnnlib.EasyDict()
for name in self.names():
stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
return stats
def __getitem__(self, name):
r"""Convenience getter.
`collector[name]` is a synonym for `collector.mean(name)`.
"""
return self.mean(name)
#----------------------------------------------------------------------------
def _sync(names):
r"""Synchronize the global cumulative counters across devices and
processes. Called internally by `Collector.update()`.
"""
if len(names) == 0:
return []
global _sync_called
_sync_called = True
# Collect deltas within current rank.
deltas = []
device = _sync_device if _sync_device is not None else torch.device('cpu')
for name in names:
delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
for counter in _counters[name].values():
delta.add_(counter.to(device))
counter.copy_(torch.zeros_like(counter))
deltas.append(delta)
deltas = torch.stack(deltas)
# Sum deltas across ranks.
if _sync_device is not None:
torch.distributed.all_reduce(deltas)
# Update cumulative values.
deltas = deltas.cpu()
for idx, name in enumerate(names):
if name not in _cumulative:
_cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
_cumulative[name].add_(deltas[idx])
# Return name-value pairs.
return [(name, _cumulative[name]) for name in names]
#----------------------------------------------------------------------------
| 10,707 | 38.806691 | 118 | py |
DFMGAN | DFMGAN-main/torch_utils/persistence.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for pickling Python code alongside other data.
The pickled code is automatically imported into a separate Python module
during unpickling. This way, any previously exported pickles will remain
usable even if the original code is no longer available, or if the current
version of the code is not consistent with what was originally pickled."""
import sys
import pickle
import io
import inspect
import copy
import uuid
import types
import dnnlib
#----------------------------------------------------------------------------
_version = 6 # internal version number
_decorators = set() # {decorator_class, ...}
_import_hooks = [] # [hook_function, ...]
_module_to_src_dict = dict() # {module: src, ...}
_src_to_module_dict = dict() # {src: module, ...}
#----------------------------------------------------------------------------
def persistent_class(orig_class):
r"""Class decorator that extends a given class to save its source code
when pickled.
Example:
from torch_utils import persistence
@persistence.persistent_class
class MyNetwork(torch.nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.fc = MyLayer(num_inputs, num_outputs)
...
@persistence.persistent_class
class MyLayer(torch.nn.Module):
...
When pickled, any instance of `MyNetwork` and `MyLayer` will save its
source code alongside other internal state (e.g., parameters, buffers,
and submodules). This way, any previously exported pickle will remain
usable even if the class definitions have been modified or are no
longer available.
The decorator saves the source code of the entire Python module
containing the decorated class. It does *not* save the source code of
any imported modules. Thus, the imported modules must be available
during unpickling, also including `torch_utils.persistence` itself.
It is ok to call functions defined in the same module from the
decorated class. However, if the decorated class depends on other
classes defined in the same module, they must be decorated as well.
This is illustrated in the above example in the case of `MyLayer`.
It is also possible to employ the decorator just-in-time before
calling the constructor. For example:
cls = MyLayer
if want_to_make_it_persistent:
cls = persistence.persistent_class(cls)
layer = cls(num_inputs, num_outputs)
As an additional feature, the decorator also keeps track of the
arguments that were used to construct each instance of the decorated
class. The arguments can be queried via `obj.init_args` and
`obj.init_kwargs`, and they are automatically pickled alongside other
object state. A typical use case is to first unpickle a previous
instance of a persistent class, and then upgrade it to use the latest
version of the source code:
with open('old_pickle.pkl', 'rb') as f:
old_net = pickle.load(f)
new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)
misc.copy_params_and_buffers(old_net, new_net, require_all=True)
"""
assert isinstance(orig_class, type)
if is_persistent(orig_class):
return orig_class
assert orig_class.__module__ in sys.modules
orig_module = sys.modules[orig_class.__module__]
orig_module_src = _module_to_src(orig_module)
class Decorator(orig_class):
_orig_module_src = orig_module_src
_orig_class_name = orig_class.__name__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_args = copy.deepcopy(args)
self._init_kwargs = copy.deepcopy(kwargs)
assert orig_class.__name__ in orig_module.__dict__
_check_pickleable(self.__reduce__())
@property
def init_args(self):
return copy.deepcopy(self._init_args)
@property
def init_kwargs(self):
return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
def __reduce__(self):
fields = list(super().__reduce__())
fields += [None] * max(3 - len(fields), 0)
if fields[0] is not _reconstruct_persistent_obj:
meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2])
fields[0] = _reconstruct_persistent_obj # reconstruct func
fields[1] = (meta,) # reconstruct args
fields[2] = None # state dict
return tuple(fields)
Decorator.__name__ = orig_class.__name__
_decorators.add(Decorator)
return Decorator
#----------------------------------------------------------------------------
def is_persistent(obj):
r"""Test whether the given object or class is persistent, i.e.,
whether it will save its source code when pickled.
"""
try:
if obj in _decorators:
return True
except TypeError:
pass
return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck
#----------------------------------------------------------------------------
def import_hook(hook):
r"""Register an import hook that is called whenever a persistent object
is being unpickled. A typical use case is to patch the pickled source
code to avoid errors and inconsistencies when the API of some imported
module has changed.
The hook should have the following signature:
hook(meta) -> modified meta
`meta` is an instance of `dnnlib.EasyDict` with the following fields:
type: Type of the persistent object, e.g. `'class'`.
version: Internal version number of `torch_utils.persistence`.
module_src Original source code of the Python module.
class_name: Class name in the original Python module.
state: Internal state of the object.
Example:
@persistence.import_hook
def wreck_my_network(meta):
if meta.class_name == 'MyNetwork':
print('MyNetwork is being imported. I will wreck it!')
meta.module_src = meta.module_src.replace("True", "False")
return meta
"""
assert callable(hook)
_import_hooks.append(hook)
#----------------------------------------------------------------------------
def _reconstruct_persistent_obj(meta):
r"""Hook that is called internally by the `pickle` module to unpickle
a persistent object.
"""
meta = dnnlib.EasyDict(meta)
meta.state = dnnlib.EasyDict(meta.state)
for hook in _import_hooks:
meta = hook(meta)
assert meta is not None
assert meta.version == _version
module = _src_to_module(meta.module_src)
assert meta.type == 'class'
orig_class = module.__dict__[meta.class_name]
decorator_class = persistent_class(orig_class)
obj = decorator_class.__new__(decorator_class)
setstate = getattr(obj, '__setstate__', None)
if callable(setstate):
setstate(meta.state) # pylint: disable=not-callable
else:
obj.__dict__.update(meta.state)
return obj
#----------------------------------------------------------------------------
def _module_to_src(module):
r"""Query the source code of a given Python module.
"""
src = _module_to_src_dict.get(module, None)
if src is None:
src = inspect.getsource(module)
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
return src
def _src_to_module(src):
r"""Get or create a Python module for the given source code.
"""
module = _src_to_module_dict.get(src, None)
if module is None:
module_name = "_imported_module_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
exec(src, module.__dict__) # pylint: disable=exec-used
return module
#----------------------------------------------------------------------------
def _check_pickleable(obj):
r"""Check that the given object is pickleable, raising an exception if
it is not. This function is expected to be considerably more efficient
than actually pickling the object.
"""
def recurse(obj):
if isinstance(obj, (list, tuple, set)):
return [recurse(x) for x in obj]
if isinstance(obj, dict):
return [[recurse(x), recurse(y)] for x, y in obj.items()]
if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
return None # Python primitive types are pickleable.
if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor']:
return None # NumPy arrays and PyTorch tensors are pickleable.
if is_persistent(obj):
return None # Persistent objects are pickleable, by virtue of the constructor check.
return obj
with io.BytesIO() as f:
pickle.dump(recurse(obj), f)
#----------------------------------------------------------------------------
| 9,708 | 37.527778 | 144 | py |
DFMGAN | DFMGAN-main/torch_utils/misc.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
#----------------------------------------------------------------------------
# Cached construction of constant tensors. Avoids CPU=>GPU copy when the
# same constant is used multiple times.
_constant_cache = dict()
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
value = np.asarray(value)
if shape is not None:
shape = tuple(shape)
if dtype is None:
dtype = torch.get_default_dtype()
if device is None:
device = torch.device('cpu')
if memory_format is None:
memory_format = torch.contiguous_format
key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
tensor = _constant_cache.get(key, None)
if tensor is None:
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
if shape is not None:
tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
tensor = tensor.contiguous(memory_format=memory_format)
_constant_cache[key] = tensor
return tensor
#----------------------------------------------------------------------------
# Replace NaN/Inf with specified numerical values.
try:
nan_to_num = torch.nan_to_num # 1.8.0a0
except AttributeError:
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
assert isinstance(input, torch.Tensor)
if posinf is None:
posinf = torch.finfo(input.dtype).max
if neginf is None:
neginf = torch.finfo(input.dtype).min
assert nan == 0
return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
#----------------------------------------------------------------------------
# Symbolic assert.
try:
symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
except AttributeError:
symbolic_assert = torch.Assert # 1.7.0
#----------------------------------------------------------------------------
# Context manager to suppress known warnings in torch.jit.trace().
class suppress_tracer_warnings(warnings.catch_warnings):
def __enter__(self):
super().__enter__()
warnings.simplefilter('ignore', category=torch.jit.TracerWarning)
return self
#----------------------------------------------------------------------------
# Assert that the shape of a tensor matches the given list of integers.
# None indicates that the size of a dimension is allowed to vary.
# Performs symbolic assertion when used in torch.jit.trace().
def assert_shape(tensor, ref_shape):
if tensor.ndim != len(ref_shape):
raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
if ref_size is None:
pass
elif isinstance(ref_size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')
elif isinstance(size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
elif size != ref_size:
raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
#----------------------------------------------------------------------------
# Function decorator that calls torch.autograd.profiler.record_function().
def profiled_function(fn):
def decorator(*args, **kwargs):
with torch.autograd.profiler.record_function(fn.__name__):
return fn(*args, **kwargs)
decorator.__name__ = fn.__name__
return decorator
#----------------------------------------------------------------------------
# Sampler for torch.utils.data.DataLoader that loops over the dataset
# indefinitely, shuffling items as it goes.
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
assert len(dataset) > 0
assert num_replicas > 0
assert 0 <= rank < num_replicas
assert 0 <= window_size <= 1
super().__init__(dataset)
self.dataset = dataset
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
def __iter__(self):
order = np.arange(len(self.dataset))
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint(order.size * self.window_size))
idx = 0
while True:
i = idx % order.size
if idx % self.num_replicas == self.rank:
yield order[i]
if window >= 2:
j = (i - rnd.randint(window)) % order.size
order[i], order[j] = order[j], order[i]
idx += 1
#----------------------------------------------------------------------------
# Utilities for operating with torch.nn.Module parameters and buffers.
def params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.parameters()) + list(module.buffers())
def named_params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.named_parameters()) + list(module.named_buffers())
def copy_params_and_buffers(src_module, dst_module, require_all=False):
assert isinstance(src_module, torch.nn.Module)
assert isinstance(dst_module, torch.nn.Module)
src_tensors = {name: tensor for name, tensor in named_params_and_buffers(src_module)}
copied_list = []
for name, tensor in named_params_and_buffers(dst_module):
assert (name in src_tensors) or (not require_all)
if name in src_tensors:
tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad)
copied_list.append(name)
return copied_list
#----------------------------------------------------------------------------
# Context manager for easily enabling/disabling DistributedDataParallel
# synchronization.
@contextlib.contextmanager
def ddp_sync(module, sync):
assert isinstance(module, torch.nn.Module)
if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
yield
else:
with module.no_sync():
yield
#----------------------------------------------------------------------------
# Check DistributedDataParallel consistency across processes.
def check_ddp_consistency(module, ignore_regex=None):
assert isinstance(module, torch.nn.Module)
for name, tensor in named_params_and_buffers(module):
fullname = type(module).__name__ + '.' + name
if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
continue
tensor = tensor.detach()
other = tensor.clone()
torch.distributed.broadcast(tensor=other, src=0)
assert (nan_to_num(tensor) == nan_to_num(other)).all(), fullname
#----------------------------------------------------------------------------
# Print summary table of module hierarchy.
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
assert isinstance(module, torch.nn.Module)
assert not isinstance(module, torch.jit.ScriptModule)
assert isinstance(inputs, (tuple, list))
# Register hooks.
entries = []
nesting = [0]
def pre_hook(_mod, _inputs):
nesting[0] += 1
def post_hook(mod, _inputs, outputs):
nesting[0] -= 1
if nesting[0] <= max_nesting:
outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
# Run module.
outputs = module(*inputs)
for hook in hooks:
hook.remove()
# Identify unique outputs, parameters, and buffers.
tensors_seen = set()
for e in entries:
e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen]
e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen]
e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs}
# Filter out redundant entries.
if skip_redundant:
entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)]
# Construct table.
rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
rows += [['---'] * len(rows[0])]
param_total = 0
buffer_total = 0
submodule_names = {mod: name for name, mod in module.named_modules()}
for e in entries:
name = '<top-level>' if e.mod is module else submodule_names[e.mod]
param_size = sum(t.numel() for t in e.unique_params)
buffer_size = sum(t.numel() for t in e.unique_buffers)
output_shapes = [str(list(e.outputs[0].shape)) for t in e.outputs]
output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
rows += [[
name + (':0' if len(e.outputs) >= 2 else ''),
str(param_size) if param_size else '-',
str(buffer_size) if buffer_size else '-',
(output_shapes + ['-'])[0],
(output_dtypes + ['-'])[0],
]]
for idx in range(1, len(e.outputs)):
rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]]
param_total += param_size
buffer_total += buffer_size
rows += [['---'] * len(rows[0])]
rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
# Print table.
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths)))
print()
return outputs
#----------------------------------------------------------------------------
| 11,073 | 40.631579 | 133 | py |
DFMGAN | DFMGAN-main/torch_utils/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# empty
| 436 | 42.7 | 76 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/bias_act.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient bias and activation."""
import os
import warnings
import numpy as np
import torch
import dnnlib
import traceback
from .. import custom_ops
from .. import misc
#----------------------------------------------------------------------------
activation_funcs = {
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
}
#----------------------------------------------------------------------------
_inited = False
_plugin = None
_null_tensor = torch.empty([0])
def _init():
global _inited, _plugin
if not _inited:
_inited = True
sources = ['bias_act.cpp', 'bias_act.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn('Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
return _plugin is not None
#----------------------------------------------------------------------------
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
#----------------------------------------------------------------------------
@misc.profiled_function
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops.
"""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
#----------------------------------------------------------------------------
_bias_act_cuda_cache = dict()
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda
#----------------------------------------------------------------------------
| 10,047 | 46.173709 | 185 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/grid_sample_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.grid_sample` that
supports arbitrarily high order gradients between the input and output.
Only works on 2D images and assumes
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
import warnings
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
#----------------------------------------------------------------------------
def grid_sample(input, grid):
if _should_use_custom_op():
return _GridSample2dForward.apply(input, grid)
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
#----------------------------------------------------------------------------
def _should_use_custom_op():
if not enabled:
return False
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
return True
warnings.warn(f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().')
return False
#----------------------------------------------------------------------------
class _GridSample2dForward(torch.autograd.Function):
@staticmethod
def forward(ctx, input, grid):
assert input.ndim == 4
assert grid.ndim == 4
output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
ctx.save_for_backward(input, grid)
return output
@staticmethod
def backward(ctx, grad_output):
input, grid = ctx.saved_tensors
grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
return grad_input, grad_grid
#----------------------------------------------------------------------------
class _GridSample2dBackward(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input, grid):
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
ctx.save_for_backward(grid)
return grad_input, grad_grid
@staticmethod
def backward(ctx, grad2_grad_input, grad2_grad_grid):
_ = grad2_grad_grid # unused
grid, = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
grad2_grid = None
if ctx.needs_input_grad[0]:
grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
assert not ctx.needs_input_grad[2]
return grad2_grad_output, grad2_input, grad2_grid
#----------------------------------------------------------------------------
| 3,299 | 38.285714 | 138 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/conv2d_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.conv2d` that supports
arbitrarily high order gradients with zero performance penalty."""
import warnings
import contextlib
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
@contextlib.contextmanager
def no_weight_gradients():
global weight_gradients_disabled
old = weight_gradients_disabled
weight_gradients_disabled = True
yield
weight_gradients_disabled = old
#----------------------------------------------------------------------------
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
#----------------------------------------------------------------------------
def _should_use_custom_op(input):
assert isinstance(input, torch.Tensor)
if (not enabled) or (not torch.backends.cudnn.enabled):
return False
if input.device.type != 'cuda':
return False
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
return True
warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
return False
def _tuple_of_ints(xs, ndim):
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
assert len(xs) == ndim
assert all(isinstance(x, int) for x in xs)
return xs
#----------------------------------------------------------------------------
_conv2d_gradfix_cache = dict()
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
# Parse arguments.
ndim = 2
weight_shape = tuple(weight_shape)
stride = _tuple_of_ints(stride, ndim)
padding = _tuple_of_ints(padding, ndim)
output_padding = _tuple_of_ints(output_padding, ndim)
dilation = _tuple_of_ints(dilation, ndim)
# Lookup from cache.
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
if key in _conv2d_gradfix_cache:
return _conv2d_gradfix_cache[key]
# Validate arguments.
assert groups >= 1
assert len(weight_shape) == ndim + 2
assert all(stride[i] >= 1 for i in range(ndim))
assert all(padding[i] >= 0 for i in range(ndim))
assert all(dilation[i] >= 0 for i in range(ndim))
if not transpose:
assert all(output_padding[i] == 0 for i in range(ndim))
else: # transpose
assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim))
# Helpers.
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
def calc_output_padding(input_shape, output_shape):
if transpose:
return [0, 0]
return [
input_shape[i + 2]
- (output_shape[i + 2] - 1) * stride[i]
- (1 - 2 * padding[i])
- dilation[i] * (weight_shape[i + 2] - 1)
for i in range(ndim)
]
# Forward & backward.
class Conv2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
assert weight.shape == weight_shape
if not transpose:
output = torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
else: # transpose
output = torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
ctx.save_for_backward(input, weight)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
assert grad_input.shape == input.shape
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
grad_weight = Conv2dGradWeight.apply(grad_output, input)
assert grad_weight.shape == weight_shape
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum([0, 2, 3])
return grad_input, grad_weight, grad_bias
# Gradient with respect to the weights.
class Conv2dGradWeight(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input):
op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight')
flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
assert grad_weight.shape == weight_shape
ctx.save_for_backward(grad_output, input)
return grad_weight
@staticmethod
def backward(ctx, grad2_grad_weight):
grad_output, input = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
if ctx.needs_input_grad[0]:
grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
assert grad2_grad_output.shape == grad_output.shape
if ctx.needs_input_grad[1]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None)
assert grad2_input.shape == input.shape
return grad2_grad_output, grad2_input
_conv2d_gradfix_cache[key] = Conv2d
return Conv2d
#----------------------------------------------------------------------------
| 7,677 | 43.900585 | 197 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/upfirdn2d.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient resampling of 2D images."""
import os
import warnings
import numpy as np
import torch
import traceback
from .. import custom_ops
from .. import misc
from . import conv2d_gradfix
#----------------------------------------------------------------------------
_inited = False
_plugin = None
def _init():
global _inited, _plugin
if not _inited:
sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn('Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
return _plugin is not None
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
#----------------------------------------------------------------------------
def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = (f.ndim == 1 and f.numel() >= 8)
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f
#----------------------------------------------------------------------------
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
#----------------------------------------------------------------------------
@misc.profiled_function
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert f.dtype == torch.float32 and not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
#----------------------------------------------------------------------------
_upfirdn2d_cuda_cache = dict()
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
# Parse arguments.
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Lookup from cache.
key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
# Forward op.
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [
fw - padx0 - 1,
iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1,
ih * upy - oh * downy + pady0 - upy + 1,
]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
# Add to cache.
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
#----------------------------------------------------------------------------
def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Filter a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape matches the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + fw // 2,
padx1 + (fw - 1) // 2,
pady0 + fh // 2,
pady1 + (fh - 1) // 2,
]
return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Upsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a multiple of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
upx, upy = _parse_scaling(up)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw + upx - 1) // 2,
padx1 + (fw - upx) // 2,
pady0 + (fh + upy - 1) // 2,
pady1 + (fh - upy) // 2,
]
return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
#----------------------------------------------------------------------------
def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Downsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a fraction of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the input. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw - downx + 1) // 2,
padx1 + (fw - downx) // 2,
pady0 + (fh - downy + 1) // 2,
pady1 + (fh - downy) // 2,
]
return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
| 16,287 | 41.306494 | 157 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/conv2d_resample.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""2D convolution with optional up/downsampling."""
import torch
from .. import misc
from . import conv2d_gradfix
from . import upfirdn2d
from .upfirdn2d import _parse_padding
from .upfirdn2d import _get_filter_size
#----------------------------------------------------------------------------
def _get_weight_shape(w):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
shape = [int(sz) for sz in w.shape]
misc.assert_shape(w, shape)
return shape
#----------------------------------------------------------------------------
def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
"""Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
"""
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
# Flip weight if requested.
if not flip_weight: # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
w = w.flip([2, 3])
# Workaround performance pitfall in cuDNN 8.0.5, triggered when using
# 1x1 kernel + memory_format=channels_last + less than 64 channels.
if kw == 1 and kh == 1 and stride == 1 and padding in [0, [0, 0], (0, 0)] and not transpose:
if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:
if out_channels <= 4 and groups == 1:
in_shape = x.shape
x = w.squeeze(3).squeeze(2) @ x.reshape([in_shape[0], in_channels_per_group, -1])
x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
else:
x = x.to(memory_format=torch.contiguous_format)
w = w.to(memory_format=torch.contiguous_format)
x = conv2d_gradfix.conv2d(x, w, groups=groups)
return x.to(memory_format=torch.channels_last)
# Otherwise => execute using conv2d_gradfix.
op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
return op(x, w, stride=stride, padding=padding, groups=groups)
#----------------------------------------------------------------------------
@misc.profiled_function
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling upfirdn2d.setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
assert isinstance(groups, int) and (groups >= 1)
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
px0, px1, py0, py1 = _parse_padding(padding)
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
# Fallback: Generic reference implementation.
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
#----------------------------------------------------------------------------
| 7,591 | 47.356688 | 130 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/fma.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
import torch
#----------------------------------------------------------------------------
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
#----------------------------------------------------------------------------
class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
@staticmethod
def forward(ctx, a, b, c): # pylint: disable=arguments-differ
out = torch.addcmul(c, a, b)
ctx.save_for_backward(a, b)
ctx.c_shape = c.shape
return out
@staticmethod
def backward(ctx, dout): # pylint: disable=arguments-differ
a, b = ctx.saved_tensors
c_shape = ctx.c_shape
da = None
db = None
dc = None
if ctx.needs_input_grad[0]:
da = _unbroadcast(dout * b, a.shape)
if ctx.needs_input_grad[1]:
db = _unbroadcast(dout * a, b.shape)
if ctx.needs_input_grad[2]:
dc = _unbroadcast(dout, c_shape)
return da, db, dc
#----------------------------------------------------------------------------
def _unbroadcast(x, shape):
extra_dims = x.ndim - len(shape)
assert extra_dims >= 0
dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape(-1, *x.shape[extra_dims+1:])
assert x.shape == shape
return x
#----------------------------------------------------------------------------
| 2,034 | 32.360656 | 105 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# empty
| 436 | 42.7 | 76 | py |
DFMGAN | DFMGAN-main/metrics/metric_utils.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import hashlib
import pickle
import copy
import uuid
import numpy as np
import torch
import dnnlib
#----------------------------------------------------------------------------
class MetricOptions:
def __init__(self, G=None, G_kwargs={}, dataset_kwargs={}, dataset2_kwargs={}, num_gpus=1, rank=0, device=None, progress=None, cache=True):
assert 0 <= rank < num_gpus
self.G = G
self.G_kwargs = dnnlib.EasyDict(G_kwargs)
self.dataset_kwargs = dnnlib.EasyDict(dataset_kwargs)
self.dataset2_kwargs = dnnlib.EasyDict(dataset2_kwargs)
self.num_gpus = num_gpus
self.rank = rank
self.device = device if device is not None else torch.device('cuda', rank)
self.progress = progress.sub() if progress is not None and rank == 0 else ProgressMonitor()
self.cache = cache
#----------------------------------------------------------------------------
_feature_detector_cache = dict()
def get_feature_detector_name(url):
return os.path.splitext(url.split('/')[-1])[0]
def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, verbose=False):
assert 0 <= rank < num_gpus
key = (url, device)
if key not in _feature_detector_cache:
is_leader = (rank == 0)
if not is_leader and num_gpus > 1:
torch.distributed.barrier() # leader goes first
with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
_feature_detector_cache[key] = torch.jit.load(f).eval().to(device)
if is_leader and num_gpus > 1:
torch.distributed.barrier() # others follow
return _feature_detector_cache[key]
#----------------------------------------------------------------------------
class FeatureStats:
def __init__(self, capture_all=False, capture_mean_cov=False, max_items=None):
self.capture_all = capture_all
self.capture_mean_cov = capture_mean_cov
self.max_items = max_items
self.num_items = 0
self.num_features = None
self.all_features = None
self.raw_mean = None
self.raw_cov = None
def set_num_features(self, num_features):
if self.num_features is not None:
assert num_features == self.num_features
else:
self.num_features = num_features
self.all_features = []
self.raw_mean = np.zeros([num_features], dtype=np.float64)
self.raw_cov = np.zeros([num_features, num_features], dtype=np.float64)
def is_full(self):
return (self.max_items is not None) and (self.num_items >= self.max_items)
def append(self, x):
x = np.asarray(x, dtype=np.float32)
assert x.ndim == 2
if (self.max_items is not None) and (self.num_items + x.shape[0] > self.max_items):
if self.num_items >= self.max_items:
return
x = x[:self.max_items - self.num_items]
self.set_num_features(x.shape[1])
self.num_items += x.shape[0]
if self.capture_all:
self.all_features.append(x)
if self.capture_mean_cov:
x64 = x.astype(np.float64)
self.raw_mean += x64.sum(axis=0)
self.raw_cov += x64.T @ x64
def append_torch(self, x, num_gpus=1, rank=0):
assert isinstance(x, torch.Tensor) and x.ndim == 2
assert 0 <= rank < num_gpus
if num_gpus > 1:
ys = []
for src in range(num_gpus):
y = x.clone()
torch.distributed.broadcast(y, src=src)
ys.append(y)
x = torch.stack(ys, dim=1).flatten(0, 1) # interleave samples
self.append(x.cpu().numpy())
def get_all(self):
assert self.capture_all
return np.concatenate(self.all_features, axis=0)
def get_all_torch(self):
return torch.from_numpy(self.get_all())
def get_mean_cov(self):
assert self.capture_mean_cov
mean = self.raw_mean / self.num_items
cov = self.raw_cov / self.num_items
cov = cov - np.outer(mean, mean)
return mean, cov
def save(self, pkl_file):
with open(pkl_file, 'wb') as f:
pickle.dump(self.__dict__, f)
@staticmethod
def load(pkl_file):
with open(pkl_file, 'rb') as f:
s = dnnlib.EasyDict(pickle.load(f))
obj = FeatureStats(capture_all=s.capture_all, max_items=s.max_items)
obj.__dict__.update(s)
return obj
#----------------------------------------------------------------------------
class ProgressMonitor:
def __init__(self, tag=None, num_items=None, flush_interval=1000, verbose=False, progress_fn=None, pfn_lo=0, pfn_hi=1000, pfn_total=1000):
self.tag = tag
self.num_items = num_items
self.verbose = verbose
self.flush_interval = flush_interval
self.progress_fn = progress_fn
self.pfn_lo = pfn_lo
self.pfn_hi = pfn_hi
self.pfn_total = pfn_total
self.start_time = time.time()
self.batch_time = self.start_time
self.batch_items = 0
if self.progress_fn is not None:
self.progress_fn(self.pfn_lo, self.pfn_total)
def update(self, cur_items):
assert (self.num_items is None) or (cur_items <= self.num_items)
if (cur_items < self.batch_items + self.flush_interval) and (self.num_items is None or cur_items < self.num_items):
return
cur_time = time.time()
total_time = cur_time - self.start_time
time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1)
if (self.verbose) and (self.tag is not None):
print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}')
self.batch_time = cur_time
self.batch_items = cur_items
if (self.progress_fn is not None) and (self.num_items is not None):
self.progress_fn(self.pfn_lo + (self.pfn_hi - self.pfn_lo) * (cur_items / self.num_items), self.pfn_total)
def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1):
return ProgressMonitor(
tag = tag,
num_items = num_items,
flush_interval = flush_interval,
verbose = self.verbose,
progress_fn = self.progress_fn,
pfn_lo = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_lo,
pfn_hi = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_hi,
pfn_total = self.pfn_total,
)
#----------------------------------------------------------------------------
def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, num_data = 1, **stats_kwargs):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) if num_data == 1 else dnnlib.util.construct_class_by_name(**opts.dataset2_kwargs)
if data_loader_kwargs is None:
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
# Try to lookup from cache.
cache_file = None
if opts.cache:
# Choose cache file name.
args = dict(dataset_kwargs=opts.dataset_kwargs if num_data == 1 else opts.dataset2_kwargs, detector_url=detector_url, detector_kwargs=detector_kwargs, stats_kwargs=stats_kwargs)
md5 = hashlib.md5(repr(sorted(args.items())).encode('utf-8'))
cache_tag = f'{dataset.name}-{get_feature_detector_name(detector_url)}-{md5.hexdigest()}'
cache_file = dnnlib.make_cache_dir_path('gan-metrics', cache_tag + '.pkl')
# Check if the file exists (all processes must agree).
flag = os.path.isfile(cache_file) if opts.rank == 0 else False
if opts.num_gpus > 1:
flag = torch.as_tensor(flag, dtype=torch.float32, device=opts.device)
torch.distributed.broadcast(tensor=flag, src=0)
flag = (float(flag.cpu()) != 0)
# Load.
if flag:
return FeatureStats.load(cache_file)
# Initialize.
num_items = len(dataset)
if max_items is not None:
num_items = min(num_items, max_items)
stats = FeatureStats(max_items=num_items, **stats_kwargs)
progress = opts.progress.sub(tag='dataset features', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
item_subset = [(i * opts.num_gpus + opts.rank) % num_items for i in range((num_items - 1) // opts.num_gpus + 1)]
for images, _labels in torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs):
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
if images.shape[1] == 4:
images = images[:, :3, :, :]
features = detector(images.to(opts.device), **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
# Save to cache.
if cache_file is not None and opts.rank == 0:
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
temp_file = cache_file + '.' + uuid.uuid4().hex
stats.save(temp_file)
os.replace(temp_file, cache_file) # atomic
return stats
#----------------------------------------------------------------------------
def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, batch_gen=None, jit=False, **stats_kwargs):
if batch_gen is None:
batch_gen = min(batch_size, 4)
assert batch_size % batch_gen == 0
# Setup generator and load labels.
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
# Image generation func.
def run_generator(z, c, defect_z = None):
img = G(z=z, c=c, defect_z = defect_z, **opts.G_kwargs)
img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8)
return img
# JIT.
if jit:
z = torch.zeros([batch_gen, G.z_dim], device=opts.device)
c = torch.zeros([batch_gen, G.c_dim], device=opts.device)
input_list = [z, c]
if G.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_z = torch.zeros([batch_gen, G.z_dim], device=opts.device)
input_list.append(defect_z)
run_generator = torch.jit.trace(run_generator, input_list, check_trace=False)
# Initialize.
stats = FeatureStats(**stats_kwargs)
assert stats.max_items is not None
progress = opts.progress.sub(tag='generator features', num_items=stats.max_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
while not stats.is_full():
images = []
for _i in range(batch_size // batch_gen):
z = torch.randn([batch_gen, G.z_dim], device=opts.device)
c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_gen)]
c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device)
defect_z = None
if G.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_z = torch.randn([batch_gen, G.z_dim], device=opts.device)
images.append(run_generator(z, c, defect_z))
images = torch.cat(images)
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
features = detector(images, **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
return stats
#----------------------------------------------------------------------------
| 12,605 | 43.076923 | 185 | py |
DFMGAN | DFMGAN-main/metrics/kernel_inception_distance.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Kernel Inception Distance (KID) from the paper "Demystifying MMD
GANs". Matches the original implementation by Binkowski et al. at
https://github.com/mbinkowski/MMD-GAN/blob/master/gan/compute_scores.py"""
import numpy as np
from . import metric_utils
#----------------------------------------------------------------------------
def compute_kid(opts, max_real, num_gen, num_subsets, max_subset_size):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all()
gen_features = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all()
if opts.rank != 0:
return float('nan')
n = real_features.shape[1]
m = min(min(real_features.shape[0], gen_features.shape[0]), max_subset_size)
t = 0
for _subset_idx in range(num_subsets):
x = gen_features[np.random.choice(gen_features.shape[0], m, replace=False)]
y = real_features[np.random.choice(real_features.shape[0], m, replace=False)]
a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3
b = (x @ y.T / n + 1) ** 3
t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m
kid = t / num_subsets / m
return float(kid)
#----------------------------------------------------------------------------
def compute_kid_between_dir(opts, max_real, num_gen, num_subsets, max_subset_size):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
detector2_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all()
real_features_2 = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector2_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real, num_data = 2).get_all()
if opts.rank != 0:
return float('nan')
n = real_features.shape[1]
m = min(min(real_features.shape[0], real_features_2.shape[0]), max_subset_size)
t = 0
for _subset_idx in range(num_subsets):
x = real_features_2[np.random.choice(real_features_2.shape[0], m, replace=False)]
y = real_features[np.random.choice(real_features.shape[0], m, replace=False)]
a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3
b = (x @ y.T / n + 1) ** 3
t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m
kid = t / num_subsets / m
return float(kid)
#----------------------------------------------------------------------------
| 3,977 | 50 | 118 | py |
DFMGAN | DFMGAN-main/metrics/frechet_inception_distance.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Frechet Inception Distance (FID) from the paper
"GANs trained by a two time-scale update rule converge to a local Nash
equilibrium". Matches the original implementation by Heusel et al. at
https://github.com/bioinf-jku/TTUR/blob/master/fid.py"""
import numpy as np
import scipy.linalg
from . import metric_utils
#----------------------------------------------------------------------------
def compute_fid(opts, max_real, num_gen):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
mu_real, sigma_real = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_mean_cov=True, max_items=max_real).get_mean_cov()
mu_gen, sigma_gen = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_mean_cov=True, max_items=num_gen).get_mean_cov()
if opts.rank != 0:
return float('nan')
m = np.square(mu_gen - mu_real).sum()
s, _ = scipy.linalg.sqrtm(np.dot(sigma_gen, sigma_real), disp=False) # pylint: disable=no-member
fid = np.real(m + np.trace(sigma_gen + sigma_real - s * 2))
return float(fid)
#----------------------------------------------------------------------------
| 2,040 | 47.595238 | 118 | py |
DFMGAN | DFMGAN-main/metrics/lpips.py | import lpips, torch
import itertools
import numpy as np
import dnnlib
from tqdm import tqdm
import copy
def compute_clpips(opts, num_gen):
dataset_kwargs = opts.dataset_kwargs
device = opts.device
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(device)
with torch.no_grad():
loss_fn_alex = lpips.LPIPS(net='alex', verbose = opts.progress.verbose).to(device) # best forward scores
#loss_fn_vgg = lpips.LPIPS(net='vgg') # closer to "traditional" perceptual loss, when used for optimization
data_list = []
dataset = dnnlib.util.construct_class_by_name(**dataset_kwargs)
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
for img, _labels in torch.utils.data.DataLoader(dataset=dataset, batch_size=64, **data_loader_kwargs):
if img.shape[1] == 1:
img = img.repeat([1, 3, 1, 1])
if img.shape[1] == 4:
img = img[:, :3, :, :]
data_list.append(img.to(device))
data_list = torch.cat(data_list, dim = 0)
cluster = [[] for _ in range(data_list.shape[0])]
label = torch.zeros([1, G.c_dim], device=device)
iterator = tqdm(range(num_gen), desc = 'Clustering') if opts.progress.verbose else range(num_gen)
for seed in iterator:
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
img = G(z, label, defect_z = z, truncation_psi = 1, noise_mode = 'const')
score_list = loss_fn_alex(img.repeat(data_list.shape[0], 1, 1, 1), data_list)
#score_list = np.array([loss_fn_alex(img, data).item() for data in data_list])
closest_index = score_list.argmin().item()
if len(cluster[closest_index]) < 200:
cluster[closest_index].append(img)
cluster_lpips = []
iterator = tqdm(cluster, desc = 'Computing clustered LPIPS') if opts.progress.verbose else cluster
for c in iterator:
# c_lpips = []
# for img1, img2 in itertools.combinations(c, 2):
# d = loss_fn_alex(img1, img2)
# c_lpips.append(d.item())
# if len(c_lpips) == 0:
# cluster_lpips.append(0.0)
# else:
# cluster_lpips.append(sum(c_lpips) / len(c_lpips))
if len(c) <= 1:
cluster_lpips.append(0.0)
continue
c_lpips = 0.0
img = torch.cat(c, dim = 0)
ref_img = img.clone()
for _ in range(img.shape[0] - 1):
img = torch.cat([img[1:], img[0:1]], dim = 0)
c_lpips += loss_fn_alex(img, ref_img).sum().item()
cluster_lpips.append(c_lpips / (img.shape[0] * (img.shape[0] - 1)))
if opts.progress.verbose:
print('Cluster Statistics:')
print([(len(cluster[i]), '%.4f' % cluster_lpips[i]) for i in range(len(data_list))])
clpips = sum(cluster_lpips) / len(cluster_lpips)
rz_sum = 0.0
n = 0
for score in cluster_lpips:
if score != 0.0:
rz_sum += score
n += 1
clpips_rz = rz_sum / n
return clpips, clpips_rz
| 3,201 | 41.131579 | 115 | py |
DFMGAN | DFMGAN-main/metrics/perceptual_path_length.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Perceptual Path Length (PPL) from the paper "A Style-Based Generator
Architecture for Generative Adversarial Networks". Matches the original
implementation by Karras et al. at
https://github.com/NVlabs/stylegan/blob/master/metrics/perceptual_path_length.py"""
import copy
import numpy as np
import torch
import dnnlib
from . import metric_utils
#----------------------------------------------------------------------------
# Spherical interpolation of a batch of vectors.
def slerp(a, b, t):
a = a / a.norm(dim=-1, keepdim=True)
b = b / b.norm(dim=-1, keepdim=True)
d = (a * b).sum(dim=-1, keepdim=True)
p = t * torch.acos(d)
c = b - d * a
c = c / c.norm(dim=-1, keepdim=True)
d = a * torch.cos(p) + c * torch.sin(p)
d = d / d.norm(dim=-1, keepdim=True)
return d
#----------------------------------------------------------------------------
class PPLSampler(torch.nn.Module):
def __init__(self, G, G_kwargs, epsilon, space, sampling, crop, vgg16):
assert space in ['z', 'w']
assert sampling in ['full', 'end']
super().__init__()
self.G = copy.deepcopy(G)
self.G_kwargs = G_kwargs
self.epsilon = epsilon
self.space = space
self.sampling = sampling
self.crop = crop
self.vgg16 = copy.deepcopy(vgg16)
def forward(self, c):
# Generate random latents and interpolation t-values.
t = torch.rand([c.shape[0]], device=c.device) * (1 if self.sampling == 'full' else 0)
z0, z1 = torch.randn([c.shape[0] * 2, self.G.z_dim], device=c.device).chunk(2)
# Interpolate in W or Z.
if self.space == 'w':
w0, w1 = self.G.mapping(z=torch.cat([z0,z1]), c=torch.cat([c,c])).chunk(2)
wt0 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2))
wt1 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2) + self.epsilon)
else: # space == 'z'
zt0 = slerp(z0, z1, t.unsqueeze(1))
zt1 = slerp(z0, z1, t.unsqueeze(1) + self.epsilon)
wt0, wt1 = self.G.mapping(z=torch.cat([zt0,zt1]), c=torch.cat([c,c])).chunk(2)
# Randomize noise buffers.
for name, buf in self.G.named_buffers():
if name.endswith('.noise_const'):
buf.copy_(torch.randn_like(buf))
# Generate images.
img = self.G.synthesis(ws=torch.cat([wt0,wt1]), noise_mode='const', force_fp32=True, **self.G_kwargs)
# Center crop.
if self.crop:
assert img.shape[2] == img.shape[3]
c = img.shape[2] // 8
img = img[:, :, c*3 : c*7, c*2 : c*6]
# Downsample to 256x256.
factor = self.G.img_resolution // 256
if factor > 1:
img = img.reshape([-1, img.shape[1], img.shape[2] // factor, factor, img.shape[3] // factor, factor]).mean([3, 5])
# Scale dynamic range from [-1,1] to [0,255].
img = (img + 1) * (255 / 2)
if self.G.img_channels == 1:
img = img.repeat([1, 3, 1, 1])
# Evaluate differential LPIPS.
lpips_t0, lpips_t1 = self.vgg16(img, resize_images=False, return_lpips=True).chunk(2)
dist = (lpips_t0 - lpips_t1).square().sum(1) / self.epsilon ** 2
return dist
#----------------------------------------------------------------------------
def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size, jit=False):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
vgg16_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose)
# Setup sampler.
sampler = PPLSampler(G=opts.G, G_kwargs=opts.G_kwargs, epsilon=epsilon, space=space, sampling=sampling, crop=crop, vgg16=vgg16)
sampler.eval().requires_grad_(False).to(opts.device)
if jit:
c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device)
sampler = torch.jit.trace(sampler, [c], check_trace=False)
# Sampling loop.
dist = []
progress = opts.progress.sub(tag='ppl sampling', num_items=num_samples)
for batch_start in range(0, num_samples, batch_size * opts.num_gpus):
progress.update(batch_start)
c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_size)]
c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device)
x = sampler(c)
for src in range(opts.num_gpus):
y = x.clone()
if opts.num_gpus > 1:
torch.distributed.broadcast(y, src=src)
dist.append(y)
progress.update(num_samples)
# Compute PPL.
if opts.rank != 0:
return float('nan')
dist = torch.cat(dist)[:num_samples].cpu().numpy()
lo = np.percentile(dist, 1, interpolation='lower')
hi = np.percentile(dist, 99, interpolation='higher')
ppl = np.extract(np.logical_and(dist >= lo, dist <= hi), dist).mean()
return float(ppl)
#----------------------------------------------------------------------------
| 5,538 | 40.962121 | 131 | py |
DFMGAN | DFMGAN-main/metrics/inception_score.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Inception Score (IS) from the paper "Improved techniques for training
GANs". Matches the original implementation by Salimans et al. at
https://github.com/openai/improved-gan/blob/master/inception_score/model.py"""
import numpy as np
from . import metric_utils
#----------------------------------------------------------------------------
def compute_is(opts, num_gen, num_splits):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(no_output_bias=True) # Match the original implementation by not applying bias in the softmax layer.
gen_probs = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
capture_all=True, max_items=num_gen).get_all()
if opts.rank != 0:
return float('nan'), float('nan')
scores = []
for i in range(num_splits):
part = gen_probs[i * num_gen // num_splits : (i + 1) * num_gen // num_splits]
kl = part * (np.log(part) - np.log(np.mean(part, axis=0, keepdims=True)))
kl = np.mean(np.sum(kl, axis=1))
scores.append(np.exp(kl))
return float(np.mean(scores)), float(np.std(scores))
#----------------------------------------------------------------------------
| 1,874 | 47.076923 | 126 | py |
DFMGAN | DFMGAN-main/metrics/metric_main.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import lpips
#----------------------------------------------------------------------------
_metric_dict = dict() # name => fn
def register_metric(fn):
assert callable(fn)
_metric_dict[fn.__name__] = fn
return fn
def is_valid_metric(metric):
return metric in _metric_dict
def list_valid_metrics():
return list(_metric_dict.keys())
#----------------------------------------------------------------------------
def calc_metric(metric, **kwargs): # See metric_utils.MetricOptions for the full list of arguments.
assert is_valid_metric(metric)
opts = metric_utils.MetricOptions(**kwargs)
# Calculate.
start_time = time.time()
results = _metric_dict[metric](opts)
total_time = time.time() - start_time
# Broadcast results.
for key, value in list(results.items()):
if opts.num_gpus > 1:
value = torch.as_tensor(value, dtype=torch.float64, device=opts.device)
torch.distributed.broadcast(tensor=value, src=0)
value = float(value.cpu())
results[key] = value
# Decorate with metadata.
return dnnlib.EasyDict(
results = dnnlib.EasyDict(results),
metric = metric,
total_time = total_time,
total_time_str = dnnlib.util.format_time(total_time),
num_gpus = opts.num_gpus,
)
#----------------------------------------------------------------------------
def report_metric(result_dict, run_dir=None, snapshot_pkl=None):
metric = result_dict['metric']
assert is_valid_metric(metric)
if run_dir is not None and snapshot_pkl is not None:
snapshot_pkl = os.path.relpath(snapshot_pkl, run_dir)
jsonl_line = json.dumps(dict(result_dict, snapshot_pkl=snapshot_pkl, timestamp=time.time()))
print(jsonl_line)
if run_dir is not None and os.path.isdir(run_dir):
with open(os.path.join(run_dir, f'metric-{metric}.jsonl'), 'at') as f:
f.write(jsonl_line + '\n')
#----------------------------------------------------------------------------
# Primary metrics.
@register_metric
def fid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=50000)
return dict(fid50k_full=fid)
@register_metric
def kid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid(opts, max_real=1000000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k_full=kid)
@register_metric
def fid5k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=5000)
return dict(fid5k_full=fid)
@register_metric
def kid5k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid(opts, max_real=1000000, num_gen=5000, num_subsets=100, max_subset_size=1000)
return dict(kid5k_full=kid)
@register_metric
def fid_between_dir(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid_between_dir(opts, max_real=1000000, num_gen=5000, num_subsets=100, max_subset_size=1000)
return dict(fid_between_dir=fid)
@register_metric
def kid_between_dir(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid_between_dir(opts, max_real=1000000, num_gen=5000, num_subsets=100, max_subset_size=1000)
return dict(kid_between_dir=kid)
@register_metric
def clpips1k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
clpips1k, clpips1k_rz = lpips.compute_clpips(opts, num_gen = 1000)
return dict(clpips1k = clpips1k, clpips1k_rz = clpips1k_rz)
@register_metric
def pr50k3_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
precision, recall = precision_recall.compute_pr(opts, max_real=200000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_full_precision=precision, pr50k3_full_recall=recall)
@register_metric
def ppl2_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=False, batch_size=2)
return dict(ppl2_wend=ppl)
@register_metric
def is50k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
mean, std = inception_score.compute_is(opts, num_gen=50000, num_splits=10)
return dict(is50k_mean=mean, is50k_std=std)
@register_metric
def is5k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
mean, std = inception_score.compute_is(opts, num_gen=5000, num_splits=10)
return dict(is5k_mean=mean, is5k_std=std)
#----------------------------------------------------------------------------
# Legacy metrics.
@register_metric
def fid50k(opts):
opts.dataset_kwargs.update(max_size=None)
fid = frechet_inception_distance.compute_fid(opts, max_real=50000, num_gen=50000)
return dict(fid50k=fid)
@register_metric
def kid50k(opts):
opts.dataset_kwargs.update(max_size=None)
kid = kernel_inception_distance.compute_kid(opts, max_real=50000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k=kid)
@register_metric
def pr50k3(opts):
opts.dataset_kwargs.update(max_size=None)
precision, recall = precision_recall.compute_pr(opts, max_real=50000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_precision=precision, pr50k3_recall=recall)
@register_metric
def ppl_zfull(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='z', sampling='full', crop=True, batch_size=2)
return dict(ppl_zfull=ppl)
@register_metric
def ppl_wfull(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='full', crop=True, batch_size=2)
return dict(ppl_wfull=ppl)
@register_metric
def ppl_zend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='z', sampling='end', crop=True, batch_size=2)
return dict(ppl_zend=ppl)
@register_metric
def ppl_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=True, batch_size=2)
return dict(ppl_wend=ppl)
#----------------------------------------------------------------------------
| 7,212 | 36.963158 | 147 | py |
DFMGAN | DFMGAN-main/metrics/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# empty
| 435 | 42.6 | 76 | py |
DFMGAN | DFMGAN-main/metrics/precision_recall.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Precision/Recall (PR) from the paper "Improved Precision and Recall
Metric for Assessing Generative Models". Matches the original implementation
by Kynkaanniemi et al. at
https://github.com/kynkaat/improved-precision-and-recall-metric/blob/master/precision_recall.py"""
import torch
from . import metric_utils
#----------------------------------------------------------------------------
def compute_distances(row_features, col_features, num_gpus, rank, col_batch_size):
assert 0 <= rank < num_gpus
num_cols = col_features.shape[0]
num_batches = ((num_cols - 1) // col_batch_size // num_gpus + 1) * num_gpus
col_batches = torch.nn.functional.pad(col_features, [0, 0, 0, -num_cols % num_batches]).chunk(num_batches)
dist_batches = []
for col_batch in col_batches[rank :: num_gpus]:
dist_batch = torch.cdist(row_features.unsqueeze(0), col_batch.unsqueeze(0))[0]
for src in range(num_gpus):
dist_broadcast = dist_batch.clone()
if num_gpus > 1:
torch.distributed.broadcast(dist_broadcast, src=src)
dist_batches.append(dist_broadcast.cpu() if rank == 0 else None)
return torch.cat(dist_batches, dim=1)[:, :num_cols] if rank == 0 else None
#----------------------------------------------------------------------------
def compute_pr(opts, max_real, num_gen, nhood_size, row_batch_size, col_batch_size):
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
detector_kwargs = dict(return_features=True)
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all_torch().to(torch.float16).to(opts.device)
gen_features = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all_torch().to(torch.float16).to(opts.device)
results = dict()
for name, manifold, probes in [('precision', real_features, gen_features), ('recall', gen_features, real_features)]:
kth = []
for manifold_batch in manifold.split(row_batch_size):
dist = compute_distances(row_features=manifold_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
kth.append(dist.to(torch.float32).kthvalue(nhood_size + 1).values.to(torch.float16) if opts.rank == 0 else None)
kth = torch.cat(kth) if opts.rank == 0 else None
pred = []
for probes_batch in probes.split(row_batch_size):
dist = compute_distances(row_features=probes_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
pred.append((dist <= kth).any(dim=1) if opts.rank == 0 else None)
results[name] = float(torch.cat(pred).to(torch.float32).mean() if opts.rank == 0 else 'nan')
return results['precision'], results['recall']
#----------------------------------------------------------------------------
| 3,617 | 56.428571 | 159 | py |
DFMGAN | DFMGAN-main/dnnlib/util.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Miscellaneous utility classes and functions."""
import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
# Util classes
# ------------------------------------------------------------------------------------------
class EasyDict(dict):
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
class Logger(object):
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
self.file = None
if file_name is not None:
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> "Logger":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: Union[str, bytes]) -> None:
"""Write text to stdout (and a file) and optionally flush."""
if isinstance(text, bytes):
text = text.decode()
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
return
if self.file is not None:
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
"""Flush written text to both stdout and a file, if open."""
if self.file is not None:
self.file.flush()
self.stdout.flush()
def close(self) -> None:
"""Flush, close possible files, and remove stdout/stderr mirroring."""
self.flush()
# if using multiple loggers, prevent closing in wrong order
if sys.stdout is self:
sys.stdout = self.stdout
if sys.stderr is self:
sys.stderr = self.stderr
if self.file is not None:
self.file.close()
self.file = None
# Cache directories
# ------------------------------------------------------------------------------------------
_dnnlib_cache_dir = None
def set_cache_dir(path: str) -> None:
global _dnnlib_cache_dir
_dnnlib_cache_dir = path
def make_cache_dir_path(*paths: str) -> str:
if _dnnlib_cache_dir is not None:
return os.path.join(_dnnlib_cache_dir, *paths)
if 'DNNLIB_CACHE_DIR' in os.environ:
return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
if 'HOME' in os.environ:
return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
if 'USERPROFILE' in os.environ:
return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
# Small util functions
# ------------------------------------------------------------------------------------------
def format_time(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
def ask_yes_no(question: str) -> bool:
"""Ask the user the question until the user inputs a valid answer."""
while True:
try:
print("{0} [y/n]".format(question))
return strtobool(input().lower())
except ValueError:
pass
def tuple_product(t: Tuple) -> Any:
"""Calculate the product of the tuple elements."""
result = 1
for v in t:
result *= v
return result
_str_to_ctype = {
"uint8": ctypes.c_ubyte,
"uint16": ctypes.c_uint16,
"uint32": ctypes.c_uint32,
"uint64": ctypes.c_uint64,
"int8": ctypes.c_byte,
"int16": ctypes.c_int16,
"int32": ctypes.c_int32,
"int64": ctypes.c_int64,
"float32": ctypes.c_float,
"float64": ctypes.c_double
}
def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
"""Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
type_str = None
if isinstance(type_obj, str):
type_str = type_obj
elif hasattr(type_obj, "__name__"):
type_str = type_obj.__name__
elif hasattr(type_obj, "name"):
type_str = type_obj.name
else:
raise RuntimeError("Cannot infer type name from input")
assert type_str in _str_to_ctype.keys()
my_dtype = np.dtype(type_str)
my_ctype = _str_to_ctype[type_str]
assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
return my_dtype, my_ctype
def is_pickleable(obj: Any) -> bool:
try:
with io.BytesIO() as stream:
pickle.dump(obj, stream)
return True
except:
return False
# Functionality to import modules/objects by name, and call functions by name
# ------------------------------------------------------------------------------------------
def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
"""Searches for the underlying module behind the name to some python object.
Returns the module and the object name (original name with module part removed)."""
# allow convenience shorthands, substitute them by full names
obj_name = re.sub("^np.", "numpy.", obj_name)
obj_name = re.sub("^tf.", "tensorflow.", obj_name)
# list alternatives for (module_name, local_obj_name)
parts = obj_name.split(".")
name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
# try each alternative in turn
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
return module, local_obj_name
except:
pass
# maybe some of the modules themselves contain errors?
for module_name, _local_obj_name in name_pairs:
try:
importlib.import_module(module_name) # may raise ImportError
except ImportError:
if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
raise
# maybe the requested attribute is missing?
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
except ImportError:
pass
# we are out of luck, but we have no idea why
raise ImportError(obj_name)
def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
"""Traverses the object name and returns the last (rightmost) python object."""
if obj_name == '':
return module
obj = module
for part in obj_name.split("."):
obj = getattr(obj, part)
return obj
def get_obj_by_name(name: str) -> Any:
"""Finds the python object with the given name."""
module, obj_name = get_module_from_obj_name(name)
return get_obj_from_module(module, obj_name)
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
"""Finds the python object with the given name and calls it as a function."""
assert func_name is not None
func_obj = get_obj_by_name(func_name)
assert callable(func_obj)
return func_obj(*args, **kwargs)
def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
"""Finds the python class with the given name and constructs it with the given arguments."""
return call_func_by_name(*args, func_name=class_name, **kwargs)
def get_module_dir_by_obj_name(obj_name: str) -> str:
"""Get the directory path of the module containing the given object name."""
module, _ = get_module_from_obj_name(obj_name)
return os.path.dirname(inspect.getfile(module))
def is_top_level_function(obj: Any) -> bool:
"""Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
def get_top_level_function_name(obj: Any) -> str:
"""Return the fully-qualified name of a top-level function."""
assert is_top_level_function(obj)
module = obj.__module__
if module == '__main__':
module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
return module + "." + obj.__name__
# File system helpers
# ------------------------------------------------------------------------------------------
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
"""List all files recursively in a given directory while ignoring given file and directory names.
Returns list of tuples containing both absolute and relative paths."""
assert os.path.isdir(dir_path)
base_name = os.path.basename(os.path.normpath(dir_path))
if ignores is None:
ignores = []
result = []
for root, dirs, files in os.walk(dir_path, topdown=True):
for ignore_ in ignores:
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
# dirs need to be edited in-place
for d in dirs_to_remove:
dirs.remove(d)
files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
absolute_paths = [os.path.join(root, f) for f in files]
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
if add_base_to_relative:
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
assert len(absolute_paths) == len(relative_paths)
result += zip(absolute_paths, relative_paths)
return result
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
"""Takes in a list of tuples of (src, dst) paths and copies files.
Will create all necessary directories."""
for file in files:
target_dir_name = os.path.dirname(file[1])
# will create all intermediate-level directories
if not os.path.exists(target_dir_name):
os.makedirs(target_dir_name)
shutil.copyfile(file[0], file[1])
# URL helpers
# ------------------------------------------------------------------------------------------
def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
"""Determine whether the given object is a valid URL string."""
if not isinstance(obj, str) or not "://" in obj:
return False
if allow_file_urls and obj.startswith('file://'):
return True
try:
res = requests.compat.urlparse(obj)
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
except:
return False
return True
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
"""Download the given URL and return a binary-mode file object to access the data."""
assert num_attempts >= 1
assert not (return_filename and (not cache))
# Doesn't look like an URL scheme so interpret it as a local filename.
if not re.match('^[a-z]+://', url):
return url if return_filename else open(url, "rb")
# Handle file URLs. This code handles unusual file:// patterns that
# arise on Windows:
#
# file:///c:/foo.txt
#
# which would translate to a local '/c:/foo.txt' filename that's
# invalid. Drop the forward slash for such pathnames.
#
# If you touch this code path, you should test it on both Linux and
# Windows.
#
# Some internet resources suggest using urllib.request.url2pathname() but
# but that converts forward slashes to backslashes and this causes
# its own set of problems.
if url.startswith('file://'):
filename = urllib.parse.urlparse(url).path
if re.match(r'^/[a-zA-Z]:', filename):
filename = filename[1:]
return filename if return_filename else open(filename, "rb")
assert is_url(url)
# Lookup from cache.
if cache_dir is None:
cache_dir = make_cache_dir_path('downloads')
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
if cache:
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
if len(cache_files) == 1:
filename = cache_files[0]
return filename if return_filename else open(filename, "rb")
# Download.
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print("Downloading %s ..." % url, end="", flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if len(res.content) == 0:
raise IOError("No data received")
if len(res.content) < 8192:
content_str = res.content.decode("utf-8")
if "download_warning" in res.headers.get("Set-Cookie", ""):
links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
if len(links) == 1:
url = requests.compat.urljoin(url, links[0])
raise IOError("Google Drive virus checker nag")
if "Google Drive - Quota exceeded" in content_str:
raise IOError("Google Drive download quota exceeded -- please try again later")
match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
url_name = match[1] if match else url
url_data = res.content
if verbose:
print(" done")
break
except KeyboardInterrupt:
raise
except:
if not attempts_left:
if verbose:
print(" failed")
raise
if verbose:
print(".", end="", flush=True)
# Save to cache.
if cache:
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, "wb") as f:
f.write(url_data)
os.replace(temp_file, cache_file) # atomic
if return_filename:
return cache_file
# Return data as file object.
assert not return_filename
return io.BytesIO(url_data)
| 16,625 | 33.782427 | 151 | py |
DFMGAN | DFMGAN-main/dnnlib/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .util import EasyDict, make_cache_dir_path
| 476 | 46.7 | 76 | py |
Conditionial-SWF | Conditionial-SWF-main/main.py | import glob
import os
import shutil
import configargparse
import jax
import jax.numpy as jnp
import numpy as np
import dataset
import models
import plotting
import utils
parser = configargparse.ArgumentParser()
parser.add("-c", "--config", required=True, is_config_file=True, help="config file path")
utils.setup_parser(parser)
args = parser.parse_args()
# print configs and copy code for reproducibility
logger, dirname = utils.setup_logging(args)
files_to_copy = glob.glob(os.path.dirname(os.path.realpath(__file__)) + "/*.py")
for script_src in files_to_copy:
script_dst = os.path.abspath(os.path.join(dirname, "code", os.path.basename(script_src) + ".bak"))
shutil.copyfile(script_src, script_dst)
for k, v in sorted(vars(args).items()):
logger.info(" %30s: %s" % (k, v))
# experimental setups
n_devices = jax.device_count()
devices = jax.devices()
logger.info(f"{n_devices} devices found.")
utils.setup_seed(args.seed)
data_train, data_test, label_train, label_test, data_shape = dataset.get_dataset(args.dataset)
dim, n_train_data = data_train.shape
_, n_test_data = data_test.shape
n_labels = label_train.shape[0]
cdim = dim
hdim = args.hdim
hdim_per_conv = args.hdim_per_conv
layer_steps = args.layer_steps
step_size = args.step_size
n_batched_particles = args.n_batched_particles
n_offline_particles = args.n_offline_particles
n_bins_particles = args.n_bins_particles
n_bins_data = args.n_bins_data
init_std = args.init_std
max_layer = 100000
logger.info(f"dim={dim}, #data={n_train_data}, #test data={n_test_data}")
logger.info(f"hdim={hdim}, hdim_per_conv={hdim_per_conv}, #batched particles={n_batched_particles}, #offline particles={n_offline_particles}, #layer_steps={layer_steps}, stepsize={step_size}")
logger.info(f"forward: {args.forward}, inverse: {args.inverse}")
if args.forward == "rqspline":
logger.info(f"#bins for particles={n_bins_particles}")
if args.inverse == "rqspline":
logger.info(f"#bins for data={n_bins_data}")
assert dim == np.prod(data_shape)
nrow = int(np.sqrt(args.n_viz))
assert nrow * nrow == args.n_viz # use a square number for easy visualization
assert n_offline_particles // n_devices >= args.n_viz # make sure there are enough particles on device 0 for visualization
assert args.forward in ["rqspline", "sorting"]
assert args.inverse in ["rqspline", "sorting"]
assert n_batched_particles % n_devices == n_offline_particles % n_devices == 0
assert args.downsample.lower() in ["nearest", "lanczos3", "lanczos5"]
# for class-conditional generation, data/particles are in the XxY space
if args.cond and args.cond_type == "class":
amplifier = args.amplifier
data_train = np.concatenate([data_train, label_train * amplifier], axis=0)
cdim = dim + n_labels
# make sure the dataset can be evenly split across devices
if n_train_data % n_devices != 0:
data_train = np.concatenate([data_train, data_train[:, :n_devices - n_train_data % n_devices]], axis=1)
n_train_data = data_train.shape[1]
# initialize/restore particles
if args.restore_path:
if os.path.isfile(os.path.join(args.restore_path, "particles_batched.npy")) and os.path.isfile(os.path.join(args.restore_path, "particles_offline.npy")):
particles_batched = np.load(os.path.join(args.restore_path, "particles_batched.npy"))
particles_offline = np.load(os.path.join(args.restore_path, "particles_offline.npy"))
else:
raise ValueError(f"Cannot restore from {args.restore_path}")
else:
particles_batched = np.random.randn(cdim, n_batched_particles) * init_std
particles_offline = np.random.randn(cdim, n_offline_particles) * init_std
# generate mask and initialize particles for conditional tasks
if args.cond:
if args.cond_type.lower() == "bottom":
mask = np.ones(data_shape, dtype=np.float32)
mask[:, :data_shape[1] // 2, :] = 0.0
elif args.cond_type.lower() == "right":
mask = np.ones(data_shape, dtype=np.float32)
mask[:, :, :data_shape[1] // 2] = 0.0
elif args.cond_type.lower() == "class":
mask = np.ones(cdim, dtype=np.float32)
mask[dim:] = 0.0
else:
raise NotImplementedError(f"Condition type {args.cond_type} unknown.")
mask = np.reshape(mask, (-1, 1))
if args.cond_type.lower() == "class":
# for class-conditional generation, we use uniform distribution of class labels
batched_idx = np.tile(np.repeat(np.arange(n_labels), nrow), n_batched_particles // (n_labels * nrow) + 1)
offline_idx = np.tile(np.repeat(np.arange(n_labels), nrow), n_offline_particles // (n_labels * nrow) + 1)
onehot = np.eye(n_labels) * amplifier
particles_batched[dim:, :] = onehot[:, batched_idx[:n_batched_particles]]
particles_offline[dim:, :] = onehot[:, offline_idx[:n_offline_particles]]
else:
# for image inpainting, we create partially-observed images from the dataset
n_copies = n_batched_particles // n_train_data
data_train_samples = data_train[:, :n_batched_particles - n_copies * n_train_data]
data_train_samples = np.concatenate([data_train] * n_copies + [data_train_samples], axis=1)
if args.dequantize: # TODO check if necessary
data_train_samples = data_train_samples + np.random.rand(*data_train_samples.shape) / 128.0
particles_batched = particles_batched * mask + data_train_samples * (1.0 - mask)
assert n_offline_particles % nrow == 0 and n_offline_particles // nrow <= n_test_data # for easy visualization
data_test_samples = np.repeat(data_test[:, :n_offline_particles // nrow], nrow, axis=1)
particles_offline = particles_offline * mask + data_test_samples * (1.0 - mask)
else:
mask = None
# plot initial particles
samples_0 = np.concatenate([np.reshape(particles_batched[:dim, :args.n_viz].T, (nrow, nrow, -1)), np.reshape(particles_offline[:dim, :args.n_viz].T, (nrow, nrow, -1))], axis=1)
plotting.save_image(args, 0, samples_0, prefix="batched_offline", nrow=nrow * 2)
plotting.save_image(args, 0, data_train[:dim, :args.n_viz].T, prefix="data", nrow=nrow)
# copy data to devices
particles_batched_sh = jax.device_put_sharded(np.split(particles_batched, n_devices, axis=1), devices)
particles_offline_sh = jax.device_put_sharded(np.split(particles_offline, n_devices, axis=1), devices)
data_train_sh = jax.device_put_sharded(np.split(data_train, n_devices, axis=1), devices)
particles_batched_to_save = None
# the "model" defines locally-connected projections and pyramidal schedules
if args.dataset in ["mnist", "fashion"]:
model = models.mnist_model
elif args.dataset in ["cifar10"]:
model = models.cifar10_model
elif args.dataset in ["celeba"]:
model = models.celeba_model
else:
raise NotImplementedError(f"Model for {args.dataset} unknown.")
if args.baseline:
model = models.swf_model
transform_layers, transform_steps = model(
data_shape=data_shape, mask=mask, hdim=hdim, hdim_per_conv=hdim_per_conv, step_size=step_size, layer_steps=layer_steps, forward=args.forward, inverse=args.inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, downsample_method=args.downsample, dequantize=args.dequantize
)
# generate batched & offline samples
key = jax.random.PRNGKey(args.seed)
steps_mark = list(np.cumsum(transform_steps))
assert len(steps_mark) == len(transform_layers)
for i in range(1, max_layer + 1):
if args.pyramidal:
if i > steps_mark[0]:
steps_mark = steps_mark[1:]
transform_layers = transform_layers[1:]
if not transform_layers:
break
logger.info(f"Now use {transform_layers[0]}")
key, wkey = jax.random.split(key)
key, dkey = jax.random.split(key)
dkeys = jax.random.split(dkey, n_devices)
particles_batched_sh, particles_offline_sh, ws_dist_batched_sh, ws_dist_offline_sh, particles_batched_to_save = transform_layers[0](wkey, dkeys, data_train_sh, particles_batched_sh, particles_offline_sh)
else:
nf = len(transform_layers)
key, wkey = jax.random.split(key)
key, dkey = jax.random.split(key)
dkeys = jax.random.split(dkey, n_devices)
particles_batched_sh, particles_offline_sh, ws_dist_batched_sh, ws_dist_offline_sh, particles_batched_to_save = transform_layers[np.random.randint(nf)](wkey, dkeys, data_train_sh, particles_batched_sh, particles_offline_sh)
logger.info(f"Iter {i:3d}: ws_dist_batched={jnp.mean(ws_dist_batched_sh):.5f}, ws_dist_offline={jnp.mean(ws_dist_offline_sh):.5f}")
if i % args.viz_every == 0:
samples_i = jnp.concatenate([jnp.reshape(particles_batched_to_save[0, :dim, :args.n_viz].T, (nrow, nrow, -1)), jnp.reshape(particles_offline_sh[0, :dim, :args.n_viz].T, (nrow, nrow, -1))], axis=1)
plotting.save_image(args, i, samples_i, prefix="batched_offline", nrow=nrow * 2)
# save final particles and their nearest neighbors
particles_batched = np.moveaxis(np.array(particles_batched_to_save), 0, 1).reshape(cdim, -1)
particles_offline = np.moveaxis(np.array(particles_offline_sh), 0, 1).reshape(cdim, -1)
with open(os.path.join(dirname, "particles", "particles_batched.npy"), "wb") as f:
np.save(f, particles_batched)
logger.info(f"{f.name} saved.")
with open(os.path.join(dirname, "particles", "particles_offline.npy"), "wb") as f:
np.save(f, particles_offline)
logger.info(f"{f.name} saved.")
plotting.make_video(args, "batched_offline_samples", max_frame=max_layer)
del particles_batched_sh, particles_offline_sh, data_train_sh
data_train = jnp.array(data_train)
# save nearest neighbors of generated particles
all_find_neighbors = jax.vmap(utils.find_neighbors, in_axes=(1, None))
particles_batched_with_neighbors = jnp.reshape(all_find_neighbors(particles_batched[:dim, :args.n_viz], data_train[:dim]), (-1, dim))
particles_offline_with_neighbors = jnp.reshape(all_find_neighbors(particles_offline[:dim, :args.n_viz], data_train[:dim]), (-1, dim))
plotting.save_image(args, 0, particles_batched_with_neighbors, prefix="nn_batched", nrow=11)
plotting.save_image(args, 0, particles_offline_with_neighbors, prefix="nn_offline", nrow=11)
| 9,887 | 47.470588 | 287 | py |
Conditionial-SWF | Conditionial-SWF-main/plotting.py | import os
import imageio
import numpy as np
import torch
import torchvision
def save_image(args, i, data, prefix="", nrow=None):
data = (np.array(data) + 1.0) / 2.0
if args.dataset in ["mnist", "fashion"]:
data_shape = (1, 28, 28)
if args.dataset == "cifar10":
data_shape = (3, 32, 32)
if args.dataset == "celeba":
data_shape = (3, 64, 64)
nrow = int(np.sqrt(data.shape[0])) if nrow is None else nrow
torchvision.utils.save_image(torch.from_numpy(data.reshape(-1, *data_shape)), os.path.join(args.dirname, "images", prefix + f"_samples_{i:04d}.png"), nrow=nrow)
def make_video(args, prefix="", fps=24, max_frame=100000):
fileList = [os.path.join(args.dirname, "images", f"{prefix}_{i:04d}.png") for i in range(max_frame + 1)]
writer = imageio.get_writer(os.path.join(args.dirname, "videos", f"{prefix}.mp4"), fps=fps)
for im in fileList:
if os.path.exists(im):
writer.append_data(imageio.v2.imread(im))
writer.close()
| 965 | 33.5 | 162 | py |
Conditionial-SWF | Conditionial-SWF-main/utils.py | import errno
import logging
import os
import random
import time
import coloredlogs
import jax
import jax.numpy as jnp
import numpy as np
param_dict = dict(
seed=0,
hdim=10000,
hdim_per_conv=10,
layer_steps=200,
step_size=1.0,
n_batched_particles=250000,
n_offline_particles=4000,
forward="sorting",
inverse="sorting",
n_bins_particles=200,
n_bins_data=200,
downsample="lanczos5",
dequantize=True,
pyramidal=True,
basedir="output",
expname="experiment",
dataset="mnist",
n_viz=400,
viz_every=100,
restore_path=None,
cond=False,
cond_type="bottom",
amplifier=1.0,
init_std=0.1,
baseline=False,
)
def str2bool(v):
"""
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
def add_dict_to_argparser(parser, default_dict):
for k, v in default_dict.items():
v_type = type(v)
if v is None:
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f"--{k}", default=v, type=v_type)
def setup_parser(parser):
add_dict_to_argparser(parser, default_dict=param_dict)
def setup_seed(seed):
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
random.seed(seed)
def setup_logging(args):
dirname_base = args.basedir if hasattr(args, "basedir") else "basedir"
logger = logging.getLogger("COLOREDLOGS")
FORMAT = "[%(asctime)s] %(message)s"
DATEFMT = "%H:%M:%S"
LEVEL_STYLES = dict(
debug=dict(color="blue"),
info=dict(color="green"),
verbose=dict(),
warning=dict(color="yellow"),
error=dict(color="red"),
critical=dict(color="magenta"),
)
coloredlogs.install(logger=logger, level="info", fmt=FORMAT, datefmt=DATEFMT, level_styles=LEVEL_STYLES)
# Determine suffix
suffix = ""
suffix += args.dataset if hasattr(args, "dataset") else ""
suffix += "-" if suffix else ""
suffix += args.cond_type if hasattr(args, "cond") and hasattr(args, "cond_type") and args.cond else "uncond"
suffix += "-" if suffix else ""
suffix += args.forward if hasattr(args, "forward") else ""
suffix += str(args.n_bins_particles) if hasattr(args, "forward") and hasattr(args, "n_bins_particles") and args.forward == "rqspline" else ""
suffix += "-" if suffix else ""
suffix += args.inverse if hasattr(args, "inverse") else ""
suffix += str(args.n_bins_data) if hasattr(args, "inverse") and hasattr(args, "n_bins_data") and args.inverse == "rqspline" else ""
suffix += "-" if suffix else ""
suffix += args.downsample if hasattr(args, "downsample") else ""
suffix += "-" if suffix else ""
suffix += "{{" + (str(args.expname if args.expname else "debug") if hasattr(args, "expname") else "") + "}}"
suffix += "-hd" + str(args.hdim) if hasattr(args, "hdim") else ""
suffix += "-hdc" + str(args.hdim_per_conv) if hasattr(args, "hdim_per_conv") else ""
suffix += "-lst" + str(args.layer_steps) if hasattr(args, "layer_steps") else ""
suffix += "-lr" + str(args.step_size) if hasattr(args, "step_size") else ""
suffix += "-std" + str(args.init_std) if hasattr(args, "init_std") else ""
suffix += "-np" + str(args.n_batched_particles) if hasattr(args, "n_batched_particles") else ""
suffix += "-xi" + str(args.amplifier) if hasattr(args, "amplifier") and hasattr(args, "cond") and args.cond else ""
suffix += "-seed" + str(args.seed) if hasattr(args, "seed") else ""
# Determine prefix
prefix = time.strftime("%Y-%m-%d--%H-%M")
prefix_counter = 0
dirname = dirname_base + "/%s.%s" % (prefix, suffix)
while True:
try:
os.makedirs(dirname)
os.makedirs(os.path.join(dirname, "code"))
os.makedirs(os.path.join(dirname, "images"))
os.makedirs(os.path.join(dirname, "videos"))
os.makedirs(os.path.join(dirname, "particles"))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
prefix_counter += 1
dirname = dirname_base + "/%s+%d.%s" % (prefix, prefix_counter, suffix)
if prefix_counter >= 10:
exit()
else:
break
formatter = logging.Formatter(FORMAT, DATEFMT)
logger_fname = os.path.join(dirname, "logfile.txt")
fh = logging.FileHandler(logger_fname)
fh.setFormatter(formatter)
logger.addHandler(fh)
# logger.propagate = False
args.dirname = dirname
return logger, dirname
def find_neighbors(x, data):
data_sqnorm = jnp.sum(jnp.square(data), axis=0)
sqdist = jnp.sum(jnp.square(x)) + data_sqnorm - 2 * jnp.matmul(x, data)
_, idx = jax.lax.top_k(-sqdist, 10)
return jnp.vstack([x, data[:, idx].T])
| 4,759 | 30.111111 | 143 | py |
Conditionial-SWF | Conditionial-SWF-main/dataset.py | import numpy as np
import torch
import torchvision
def mnist():
ds = torchvision.datasets.MNIST(root="./data", train=True, download=True)
dst = torchvision.datasets.MNIST(root="./data", train=False, download=True)
mx = ds.data.float()
mxt = dst.data.float()
my = torch.nn.functional.one_hot(ds.targets, num_classes=10).float().numpy()
myt = torch.nn.functional.one_hot(dst.targets, num_classes=10).float().numpy()
mx = mx / 256.0
mxt = mxt / 256.0
mx = mx.flatten(1).numpy() * 2.0 - 1.0
mxt = mxt.flatten(1).numpy() * 2.0 - 1.0
return mx, mxt, my, myt
def fashionmnist():
ds = torchvision.datasets.FashionMNIST(root="./data", train=True, download=True)
dst = torchvision.datasets.FashionMNIST(root="./data", train=False, download=True)
mx = ds.data.float()
mxt = dst.data.float()
my = torch.nn.functional.one_hot(ds.targets, num_classes=10).float().numpy()
myt = torch.nn.functional.one_hot(dst.targets, num_classes=10).float().numpy()
mx = mx / 256.0
mxt = mxt / 256.0
mx = mx.flatten(1).numpy() * 2.0 - 1.0
mxt = mxt.flatten(1).numpy() * 2.0 - 1.0
return mx, mxt, my, myt
def cifar10(flip=True):
ds = torchvision.datasets.CIFAR10(root="./data", train=True, download=True)
dst = torchvision.datasets.CIFAR10(root="./data", train=False, download=True)
mx = np.moveaxis(ds.data.astype(np.float32), 3, 1)
mxt = np.moveaxis(dst.data.astype(np.float32), 3, 1)
eye = np.eye(10)
my = eye[ds.targets]
myt = eye[dst.targets]
if flip:
mx_flip = mx[:, :, :, ::-1]
mx = np.concatenate([mx, mx_flip], axis=0)
my = np.concatenate([my, my], axis=0)
mx = mx / 256.0
mxt = mxt / 256.0
mx = mx.reshape(mx.shape[0], -1) * 2.0 - 1.0
mxt = mxt.reshape(mxt.shape[0], -1) * 2.0 - 1.0
return mx, mxt, my, myt
def get_dataset(name):
"""Return numpy array of dataset"""
if name == "mnist":
mx, mxt, my, myt = mnist()
data_shape = (1, 28, 28)
elif name == "fashion":
mx, mxt, my, myt = fashionmnist()
data_shape = (1, 28, 28)
elif name == "cifar10":
mx, mxt, my, myt = cifar10()
data_shape = (3, 32, 32)
elif name == "celeba":
mx = np.load("./data/celeba_train.npy") * 255.0 / 256.0 * 2.0 - 1.0
mxt = np.load("./data/celeba_eval.npy") * 255.0 / 256.0 * 2.0 - 1.0
data_shape = (3, 64, 64)
my = np.zeros((mx.shape[0], 1))
myt = np.zeros((mxt.shape[0], 1))
else:
raise NotImplementedError(f"Dataset {name} unknown.")
perm = np.random.permutation(mx.shape[0])
mx = mx[perm]
my = my[perm]
permt = np.random.permutation(mxt.shape[0])
mxt = mxt[permt]
myt = myt[permt]
assert mx.shape[0] == my.shape[0]
assert mxt.shape[0] == myt.shape[0]
assert mx.shape[1] == mxt.shape[1]
assert my.shape[1] == myt.shape[1]
mx.flags.writeable = False
my.flags.writeable = False
mxt.flags.writeable = False
myt.flags.writeable = False
return mx.T, mxt.T, my.T, myt.T, data_shape
| 2,912 | 31.730337 | 84 | py |
Conditionial-SWF | Conditionial-SWF-main/slicers.py | import jax
import jax.numpy as jnp
import numpy as np
def uniform(key, dim, hdim, **kwargs):
w = jax.random.normal(key, shape=(hdim, dim))
w_norm = jnp.linalg.norm(w, axis=1, keepdims=True)
w = w / w_norm
return w
def conv(key, input_shape, hdim, n_filters, kernel_sizes, strides=1, paddings="SAME", dilations=1, normalize=True, **kwargs):
kernel_sizes = kernel_sizes if isinstance(kernel_sizes, (list, tuple)) else (kernel_sizes,)
n_filters = n_filters if isinstance(n_filters, (list, tuple)) else (n_filters,) * len(kernel_sizes)
strides = strides if isinstance(strides, (list, tuple)) else (strides,) * len(kernel_sizes)
paddings = paddings if isinstance(paddings, (list, tuple)) else (paddings,) * len(kernel_sizes)
dilations = dilations if isinstance(dilations, (list, tuple)) else (dilations,) * len(kernel_sizes)
assert len(n_filters) == len(kernel_sizes) == len(strides) == len(paddings) == len(dilations)
n_convs = len(n_filters)
n_filters = (input_shape[0],) + n_filters
kernels = []
for i in range(n_convs):
key, subkey = jax.random.split(key)
kernels.append(jax.random.normal(subkey, shape=(n_filters[i + 1], n_filters[i], kernel_sizes[i], kernel_sizes[i])) * 0.1)
# kernels.append(jax.random.laplace(subkey, shape=(n_filters[i + 1], n_filters[i], kernel_sizes[i], kernel_sizes[i])) * 0.1)
# we obtain the equivalent projections through vjp through the forward mapping
def f(x):
for i in range(n_convs):
stride = (strides[i],) * 2
padding = paddings[i] if isinstance(paddings[i], str) else (paddings[i],) * 2
dilation = (dilations[i],) * 2
x = jax.lax.conv_general_dilated(x, kernels[i], window_strides=stride, padding=padding, rhs_dilation=dilation)
return x
x_dummy = jnp.zeros((1, *input_shape))
f_value, f_vjp = jax.vjp(f, x_dummy)
outdim = np.prod(f_value.shape)
hdim = outdim if hdim is None else hdim
assert outdim >= hdim
if outdim > hdim:
key, subkey = jax.random.split(key)
perm = jax.random.permutation(subkey, outdim)
I = jax.nn.one_hot(perm[:hdim], outdim)
else:
I = jnp.eye(outdim)
def wi(v):
return f_vjp(v.reshape(f_value.shape))[0]
w = jax.vmap(wi, in_axes=0)(I)
w = jnp.reshape(w, (w.shape[0], np.prod(input_shape)))
if normalize:
w_norm = jnp.linalg.norm(w, axis=1, keepdims=True)
w = w / w_norm
return w
# a wrapper function to obtain the upsampled projection from lower resolutions
def downsample_slicer(key, slice_fn, input_shape, down_size, **kwargs):
down_shape = (input_shape[0], *down_size)
kwargs["dim"] = np.prod(down_shape)
kwargs["input_shape"] = down_shape
sub_w = slice_fn(key, **kwargs)
sub_w = jnp.reshape(sub_w, (sub_w.shape[0], *down_shape))
method = kwargs["method"] if "method" in kwargs else "lanczos3"
w = jax.image.resize(sub_w, (sub_w.shape[0], *input_shape), method=method)
w = jnp.reshape(w, (w.shape[0], np.prod(input_shape)))
w_norm = jnp.linalg.norm(w, axis=1, keepdims=True)
w = w / w_norm
return w
| 3,025 | 37.303797 | 128 | py |
Conditionial-SWF | Conditionial-SWF-main/layers.py | import functools
import jax
import jax.numpy as jnp
import jax.scipy
def sorting_forward(xs, x):
nx = xs.shape[0]
idx = jnp.searchsorted(xs, x)
im1 = jnp.clip(idx - 1, 0, nx - 1)
i = jnp.clip(idx, 0, nx - 1)
# if falls in the middle
delta_x = xs[i] - xs[im1]
offset_x = x - xs[im1]
rel_offset = jnp.clip(jnp.nan_to_num(offset_x / delta_x), 0.0, 1.0)
cdf = (im1 + rel_offset + 0.5) / nx
return cdf
def sorting_inverse(ys, cdf):
ny = ys.shape[0]
jdy = jnp.int32(jnp.floor(cdf * ny))
jdy = jnp.clip(jdy, 0, ny - 1)
jp1 = jnp.clip(jdy + 1, 0, ny - 1)
a = ys[jdy] + (cdf - jdy / ny) * (ys[jp1] - ys[jdy])
return a
def rq_spline_compute_shared(bin_widths, bin_heights, knot_slopes, x_or_y, range_min=0.0, is_x=True):
"""Captures shared computations across the rational quadratic spline forward/inverse."""
assert bin_widths.ndim == bin_heights.ndim == knot_slopes.ndim == 1
kx = jnp.concatenate([jnp.full((1,), range_min), jnp.cumsum(bin_widths, axis=-1) + range_min], axis=-1)
ky = jnp.concatenate([jnp.full((1,), range_min), jnp.cumsum(bin_heights, axis=-1) + range_min], axis=-1)
kd = jnp.concatenate([jnp.full((1,), 1.0), knot_slopes, jnp.full((1,), 1.0)], axis=-1)
kx_or_ky = kx if is_x else ky
kx_or_ky_min = kx_or_ky[0]
kx_or_ky_max = kx_or_ky[-1]
out_of_bounds = (x_or_y <= kx_or_ky_min) | (x_or_y >= kx_or_ky_max)
x_or_y = jnp.where(out_of_bounds, kx_or_ky_min, x_or_y)
idx = jnp.clip(jnp.searchsorted(kx_or_ky, x_or_y) - 1, 0, kx_or_ky.shape[0] - 2)
x_k = kx[idx]
x_kp1 = kx[idx + 1]
y_k = ky[idx]
y_kp1 = ky[idx + 1]
d_k = kd[idx]
d_kp1 = kd[idx + 1]
h_k = y_kp1 - y_k
w_k = x_kp1 - x_k
s_k = h_k / w_k
return out_of_bounds, x_k, y_k, d_k, d_kp1, h_k, w_k, s_k
def rq_spline_forward(bin_widths, bin_heights, knot_slopes, x, range_min=0.0):
"""Compute the rational quadratic spline forward transformation"""
out_of_bounds, x_k, y_k, d_k, d_kp1, h_k, w_k, s_k = rq_spline_compute_shared(bin_widths, bin_heights, knot_slopes, x, range_min=range_min, is_x=True)
relx = (x - x_k) / w_k
spline_val = y_k + ((h_k * (s_k * relx**2 + d_k * relx * (1 - relx))) / (s_k + (d_kp1 + d_k - 2 * s_k) * relx * (1 - relx)))
y_val = jnp.where(out_of_bounds, x, spline_val)
return y_val
def rq_spline_inverse(bin_widths, bin_heights, knot_slopes, y, range_min=0.0):
"""Compute the rational quadratic spline inverse transformation"""
out_of_bounds, x_k, y_k, d_k, d_kp1, h_k, w_k, s_k = rq_spline_compute_shared(bin_widths, bin_heights, knot_slopes, y, range_min=range_min, is_x=False)
rely = jnp.where(out_of_bounds, 0.0, y - y_k)
term2 = rely * (d_kp1 + d_k - 2 * s_k)
# These terms are the a, b, c terms of the quadratic formula.
a = h_k * (s_k - d_k) + term2
b = h_k * d_k - term2
c = -s_k * rely
# The expression used here has better numerical behavior for small 4*a*c.
relx = jnp.where((rely == 0.0), 0.0, (2 * c) / (-b - jnp.sqrt(b**2 - 4 * a * c)))
return jnp.where(out_of_bounds, y, relx * w_k + x_k)
def layer(wkey, dkey, data_train, x_batched, x_offline, slicer_dict, dim, hdim, mask=None, step_size=1.0, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, dequantize=True, multi_devices=True, clip=None, fix_slopes=False):
assert isinstance(slicer_dict, dict)
ws = []
for slicer, num in slicer_dict.items():
wkey, subkey = jax.random.split(wkey)
skeys = jax.random.split(subkey, num)
wi = jax.vmap(slicer)(skeys)
ws.append(jnp.reshape(wi, (-1, dim)))
print(f"Slicer {slicer}: hdim = {ws[-1].shape[0]}")
w = jnp.vstack(ws)
print(f"Image slicer shape = {w.shape}")
if w.shape[0] > hdim:
wkey, subkey = jax.random.split(wkey)
w = jax.random.choice(subkey, w, (hdim,), replace=False)
print(f"[After subsampling] Slicer shape = {w.shape}")
assert w.shape[0] == hdim
# generate projections for labels
if data_train.shape[0] > dim:
n_labels = data_train.shape[0] - dim
wkey, subkey = jax.random.split(wkey)
w_labels = jax.random.laplace(subkey, shape=(hdim, n_labels))
w_labels_norm = jnp.linalg.norm(w_labels, axis=1, keepdims=True)
w_labels = w_labels / w_labels_norm
print(f"Label slicer shape = {w_labels.shape}")
w = jnp.concatenate([w * jnp.sqrt(dim / data_train.shape[0]), w_labels * jnp.sqrt(n_labels / data_train.shape[0])], axis=1)
print(f"Final slicer shape = {w.shape}")
# compute projection
x_proj = jnp.matmul(w, x_batched)
if dequantize:
data_train = data_train + jax.random.uniform(dkey, data_train.shape) / 128.0
data_proj = jnp.matmul(w, data_train)
# prepare forward and inverse functions
if forward == "rqspline":
x_proj = jnp.sort(x_proj)
x_min = x_proj[:, :1]
x_max = x_proj[:, -1:]
if multi_devices:
x_min = jax.lax.pmin(x_min, axis_name="device")
x_max = jax.lax.pmax(x_max, axis_name="device")
x_proj = (x_proj - x_min) / (x_max - x_min)
bin_edges_idx_x = jnp.int32(jnp.linspace(0.0, 1.0, num=n_bins_particles + 1)[1:-1] * x_proj.shape[-1])
bin_edges_x = x_proj[:, bin_edges_idx_x]
if multi_devices:
bin_edges_x = jax.lax.pmean(bin_edges_x, axis_name="device")
bin_edges_x = jnp.concatenate([jnp.full(bin_edges_x.shape[:-1] + (1,), 0.0), bin_edges_x, jnp.full(bin_edges_x.shape[:-1] + (1,), 1.0)], axis=-1)
hist_x, _ = jax.vmap(functools.partial(jnp.histogram, range=(0.0, 1.0), density=False))(x_proj, bin_edges_x)
hist_x = hist_x / x_proj.shape[1]
if multi_devices:
hist_x = jax.lax.pmean(hist_x, axis_name="device")
bin_widths_x = bin_edges_x[:, 1:] - bin_edges_x[:, :-1]
knot_slopes_x = (hist_x[:, :-1] + hist_x[:, 1:]) / (bin_widths_x[:, :-1] + bin_widths_x[:, 1:])
if fix_slopes:
knot_slopes_x = jnp.ones_like(knot_slopes_x)
if forward == "sorting":
if multi_devices:
x_proj = jax.lax.all_gather(x_proj, axis_name="device", axis=1)
x_proj = jnp.reshape(x_proj, (hdim, -1))
x_proj = jnp.sort(x_proj)
if inverse == "rqspline":
data_proj = jnp.sort(data_proj)
data_min = data_proj[:, :1]
data_max = data_proj[:, -1:]
if multi_devices:
data_min = jax.lax.pmin(data_min, axis_name="device")
data_max = jax.lax.pmax(data_max, axis_name="device")
data_proj = (data_proj - data_min) / (data_max - data_min)
bin_edges_idx_data = jnp.int32(jnp.linspace(0.0, 1.0, num=n_bins_data + 1)[1:-1] * data_proj.shape[-1])
bin_edges_data = data_proj[:, bin_edges_idx_data]
if multi_devices:
bin_edges_data = jax.lax.pmean(bin_edges_data, axis_name="device")
bin_edges_data = jnp.concatenate([jnp.full(bin_edges_data.shape[:-1] + (1,), 0.0), bin_edges_data, jnp.full(bin_edges_data.shape[:-1] + (1,), 1.0)], axis=-1)
hist_data, _ = jax.vmap(functools.partial(jnp.histogram, range=(0.0, 1.0), density=False))(data_proj, bin_edges_data)
hist_data = hist_data / data_proj.shape[1]
if multi_devices:
hist_data = jax.lax.pmean(hist_data, axis_name="device")
bin_widths_data = bin_edges_data[:, 1:] - bin_edges_data[:, :-1]
knot_slopes_data = (hist_data[:, :-1] + hist_data[:, 1:]) / (bin_widths_data[:, :-1] + bin_widths_data[:, 1:])
if fix_slopes:
knot_slopes_data = jnp.ones_like(knot_slopes_data)
if inverse == "sorting":
if multi_devices:
data_proj = jax.lax.all_gather(data_proj, axis_name="device", axis=1)
data_proj = jnp.reshape(data_proj, (hdim, -1))
data_proj = jnp.sort(data_proj)
# prepare unidimensional optimal transport functions
if forward == "sorting" and inverse == "sorting":
def unidim_transport(xs, ys, x):
return sorting_inverse(ys, sorting_forward(xs, x))
elif forward == "sorting" and inverse == "rqspline":
def unidim_transport(xs, bin_widths_data, bin_heights_data, knot_slopes_data, x):
return rq_spline_inverse(bin_widths_data, bin_heights_data, knot_slopes_data, sorting_forward(xs, x))
elif forward == "rqspline" and inverse == "sorting":
def unidim_transport(bin_widths_x, bin_heights_x, knot_slopes_x, ys, x):
return sorting_inverse(ys, rq_spline_forward(bin_widths_x, bin_heights_x, knot_slopes_x, x))
elif forward == "rqspline" and inverse == "rqspline":
def unidim_transport(bin_widths_x, bin_heights_x, knot_slopes_x, bin_widths_data, bin_heights_data, knot_slopes_data, x):
return rq_spline_inverse(bin_widths_data, bin_heights_data, knot_slopes_data, rq_spline_forward(bin_widths_x, bin_heights_x, knot_slopes_x, x))
else:
raise NotImplementedError(f"forward method {forward} or inverse method {inverse} unknown.")
print(f"Forward method: {forward}, Inverse method: {inverse}")
def transport(x):
y = jnp.matmul(w, x)
if forward == "sorting" and inverse == "sorting":
a = jax.vmap(unidim_transport)(x_proj, data_proj, y)
elif forward == "sorting" and inverse == "rqspline":
a_normalized = jax.vmap(unidim_transport)(x_proj, bin_widths_data, hist_data, knot_slopes_data, y)
a = a_normalized * (data_max - data_min) + data_min
elif forward == "rqspline" and inverse == "sorting":
y_normalized = (y - x_min) / (x_max - x_min)
a = jax.vmap(unidim_transport)(bin_widths_x, hist_x, knot_slopes_x, data_proj, y_normalized)
elif forward == "rqspline" and inverse == "rqspline":
y_normalized = (y - x_min) / (x_max - x_min)
a_normalized = jax.vmap(unidim_transport)(bin_widths_x, hist_x, knot_slopes_x, bin_widths_data, hist_data, knot_slopes_data, y_normalized)
a = a_normalized * (data_max - data_min) + data_min
movement = a - y
delta_x = jnp.matmul(w.T, movement) * (step_size * dim / hdim)
if mask is not None:
z = x + delta_x * mask
else:
z = x + delta_x
if clip is not None:
print(f"Enabled data clipping = {clip}")
z = jnp.clip(z, -clip, clip)
ws_dist = jnp.mean(jnp.abs(movement))
return z, ws_dist
x_batched, ws_dist_batched = transport(x_batched)
x_offline, ws_dist_offline = transport(x_offline)
# a workaround to prevent copying all particles to device 0 for storage
n_save_device = 50000 // jax.device_count()
x_batched_to_save = x_batched[:, :n_save_device]
return x_batched, x_offline, ws_dist_batched, ws_dist_offline, x_batched_to_save
| 10,259 | 43.034335 | 251 | py |
Conditionial-SWF | Conditionial-SWF-main/models.py | import functools
import jax
import numpy as np
import layers
import slicers
nfs = 20
def downsample_kxk_dense_layer(layer, data_shape, k, hdim, step_size=1.0, method="lanczos3"):
down_k_size = (k, k)
dim_ratio = np.prod(down_k_size) / np.prod(data_shape[1:])
down_k_slicer = functools.partial(
slicers.downsample_slicer,
slice_fn=slicers.uniform,
input_shape=data_shape,
down_size=down_k_size,
hdim=hdim,
method=method,
)
step_size = np.minimum(step_size, hdim / np.prod(down_k_size) / data_shape[0])
step_size = step_size * dim_ratio
down_k_layer = jax.pmap(functools.partial(layer, slicer_dict={down_k_slicer: 1}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
return down_k_layer
def downsample_kxk_conv_layer(layer, data_shape, k, hdim, hdim_per_conv, n_filters, kernel_sizes, strides=1, paddings="SAME", dilations=1, step_size=1.0, method="lanczos3"):
down_k_size = (k, k)
dim_ratio = np.prod(down_k_size) / np.prod(data_shape[1:])
down_k_slicer = functools.partial(
slicers.downsample_slicer,
slice_fn=slicers.conv,
input_shape=data_shape,
down_size=down_k_size,
hdim=hdim_per_conv,
n_filters=n_filters,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
dilations=dilations,
method=method,
)
step_size = np.minimum(step_size, hdim / np.prod(down_k_size) / data_shape[0])
step_size = step_size * dim_ratio
down_k_layer = jax.pmap(functools.partial(layer, slicer_dict={down_k_slicer: hdim // hdim_per_conv}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
return down_k_layer
def low_rez_dense_model(layer, data_shape, hdim, step_size, downsample_method="lanczos3", rezs=(1, 2, 3, 4, 5, 6), steps=(10, 100, 200, 300, 300, 300)):
assert len(rezs) == len(steps)
transform_layers, transform_steps = [], []
for rez, step in zip(rezs, steps):
dense_layer_ixi = downsample_kxk_dense_layer(layer=layer, data_shape=data_shape, k=rez, hdim=hdim, step_size=step_size, method=downsample_method)
transform_layers.append(dense_layer_ixi)
transform_steps.append(step)
assert len(transform_layers) == len(transform_steps)
return transform_layers, transform_steps
def downsample_kxk_model(layer, data_shape, k, hdim, hdim_per_conv, step_size, nfs, kss, sts, pds, dls, steps, min_convs=1, downsample_method="lanczos3", init_dense=False):
assert len(nfs) == len(kss) == len(sts) == len(pds) == len(dls)
assert len(steps) == len(nfs) + (1 if init_dense else 0)
assert min_convs >= 1
nl = len(nfs)
transform_layers, transform_steps = [], []
if init_dense:
dense_layer_kxk = downsample_kxk_dense_layer(layer=layer, data_shape=data_shape, k=k, hdim=hdim, step_size=step_size, method=downsample_method)
transform_layers.append(dense_layer_kxk)
transform_steps.append(steps[0])
steps = steps[1:]
for i in range(nl):
nf = nfs[i] if isinstance(nfs[i], (list, tuple)) else (nfs[i],) * (nl - i + min_convs - 1)
ks = kss[i] if isinstance(kss[i], (list, tuple)) else (kss[i],) * (nl - i + min_convs - 1)
st = sts[i] if isinstance(sts[i], (list, tuple)) else (sts[i],) * (nl - i + min_convs - 1)
pd = pds[i] if isinstance(pds[i], (list, tuple)) else (pds[i],) * (nl - i + min_convs - 1)
dl = dls[i] if isinstance(dls[i], (list, tuple)) else (dls[i],) * (nl - i + min_convs - 1)
conv_i_layer_kxk = downsample_kxk_conv_layer(layer=layer, data_shape=data_shape, k=k, hdim=hdim, hdim_per_conv=hdim_per_conv, n_filters=nf, kernel_sizes=ks, strides=st, paddings=pd, dilations=dl, step_size=step_size, method=downsample_method)
transform_layers.append(conv_i_layer_kxk)
transform_steps.append(steps[i])
assert len(transform_layers) == len(transform_steps)
return transform_layers, transform_steps
def kxk_model(layer, data_shape, hdim, hdim_per_conv, step_size, nfs, kss, sts, pds, dls, steps, min_convs=1, init_dense=False):
assert len(nfs) == len(kss) == len(sts) == len(pds) == len(dls)
assert len(steps) == len(nfs) + (1 if init_dense else 0)
assert min_convs >= 1
nl = len(nfs)
transform_layers, transform_steps = [], []
dim = np.prod(data_shape)
step_size = np.minimum(step_size, hdim / dim)
if init_dense:
dense_slicer = functools.partial(
slicers.uniform,
dim=dim,
hdim=hdim,
)
dense_layer = jax.pmap(functools.partial(layer, slicer_dict={dense_slicer: 1}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
transform_layers.append(dense_layer)
transform_steps.append(steps[0])
steps = steps[1:]
for i in range(nl):
nf = nfs[i] if isinstance(nfs[i], (list, tuple)) else (nfs[i],) * (nl - i + min_convs - 1)
ks = kss[i] if isinstance(kss[i], (list, tuple)) else (kss[i],) * (nl - i + min_convs - 1)
st = sts[i] if isinstance(sts[i], (list, tuple)) else (sts[i],) * (nl - i + min_convs - 1)
pd = pds[i] if isinstance(pds[i], (list, tuple)) else (pds[i],) * (nl - i + min_convs - 1)
dl = dls[i] if isinstance(dls[i], (list, tuple)) else (dls[i],) * (nl - i + min_convs - 1)
conv_i_slicer = functools.partial(
slicers.conv,
input_shape=data_shape,
hdim=hdim_per_conv,
n_filters=nf,
kernel_sizes=ks,
strides=st,
paddings=pd,
dilations=dl,
)
conv_i_layer = jax.pmap(functools.partial(layer, slicer_dict={conv_i_slicer: hdim // hdim_per_conv}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
transform_layers.append(conv_i_layer)
transform_steps.append(steps[i])
assert len(transform_layers) == len(transform_steps)
return transform_layers, transform_steps
def swf_model(data_shape, mask, hdim, step_size, layer_steps=200, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, dequantize=True, **kwargs):
dim = np.prod(data_shape)
step_size = np.minimum(step_size, hdim / dim)
layer = functools.partial(layers.layer, dim=dim, hdim=hdim, mask=mask, forward=forward, inverse=inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, dequantize=dequantize)
dense_slicer = functools.partial(
slicers.uniform,
dim=dim,
hdim=hdim,
)
dense_layer = jax.pmap(functools.partial(layer, slicer_dict={dense_slicer: 1}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
transform_layers, transform_steps = [dense_layer], [layer_steps]
return transform_layers, transform_steps
def mnist_model(data_shape, mask, hdim, hdim_per_conv, step_size, layer_steps=200, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, downsample_method="lanczos3", dequantize=True):
dim = np.prod(data_shape)
layer = functools.partial(layers.layer, dim=dim, hdim=hdim, mask=mask, forward=forward, inverse=inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, dequantize=dequantize)
transform_layers, transform_steps = [], []
lowres_layers, lowres_steps = low_rez_dense_model(layer, data_shape, hdim, step_size, downsample_method, rezs=list(range(1, 7)), steps=[20] + [layer_steps] * 5)
transform_layers.extend(lowres_layers)
transform_steps.extend(lowres_steps)
res_7x7_dl2_layers, res_7x7_dl2_steps = downsample_kxk_model(layer, data_shape, 7, hdim, hdim_per_conv, step_size, nfs=[nfs] * 2, kss=[3] * 2, sts=[1] * 2, pds=["SAME"] * 2, dls=[2] * 2, steps=[layer_steps] * 2, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_7x7_dl2_layers)
transform_steps.extend(res_7x7_dl2_steps)
res_7x7_layers, res_7x7_steps = downsample_kxk_model(layer, data_shape, 7, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[1] * 3, steps=[layer_steps] * 4, min_convs=1, downsample_method=downsample_method, init_dense=True)
transform_layers.extend(res_7x7_layers)
transform_steps.extend(res_7x7_steps)
res_11x11_dl2_layers, res_11x11_dl2_steps = downsample_kxk_model(layer, data_shape, 11, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_11x11_dl2_layers)
transform_steps.extend(res_11x11_dl2_steps)
res_11x11_layers, res_11x11_steps = downsample_kxk_model(layer, data_shape, 11, hdim, hdim_per_conv, step_size, nfs=[nfs] * 5, kss=[3] * 5, sts=[1] * 5, pds=["SAME"] * 5, dls=[1] * 5, steps=[layer_steps] * 5, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_11x11_layers)
transform_steps.extend(res_11x11_steps)
res_14x14_dl2_layers, res_14x14_dl2_steps = downsample_kxk_model(layer, data_shape, 14, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_14x14_dl2_layers)
transform_steps.extend(res_14x14_dl2_steps)
res_14x14_layers, res_14x14_steps = downsample_kxk_model(layer, data_shape, 14, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_14x14_layers)
transform_steps.extend(res_14x14_steps)
res_21x21_dl2_layers, res_21x21_dl2_steps = downsample_kxk_model(layer, data_shape, 21, hdim, hdim_per_conv, step_size, nfs=[nfs] * 4, kss=[3] * 4, sts=[1] * 4, pds=["SAME"] * 4, dls=[2] * 4, steps=[layer_steps] * 4, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_21x21_dl2_layers)
transform_steps.extend(res_21x21_dl2_steps)
res_21x21_layers, res_21x21_steps = downsample_kxk_model(layer, data_shape, 21, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_21x21_layers)
transform_steps.extend(res_21x21_steps)
res_28x28_layers, res_28x28_steps = kxk_model(layer, data_shape, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, init_dense=False)
transform_layers.extend(res_28x28_layers)
transform_steps.extend(res_28x28_steps)
return transform_layers, transform_steps
def cifar10_model(data_shape, mask, hdim, hdim_per_conv, step_size, layer_steps=300, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, downsample_method="lanczos3", dequantize=True):
dim = np.prod(data_shape)
layer = functools.partial(layers.layer, dim=dim, hdim=hdim, mask=mask, forward=forward, inverse=inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, dequantize=dequantize)
transform_layers, transform_steps = [], []
lowres_layers, lowres_steps = low_rez_dense_model(layer, data_shape, hdim, step_size, downsample_method, rezs=list(range(1, 8)), steps=[20] + [layer_steps] * 6)
transform_layers.extend(lowres_layers)
transform_steps.extend(lowres_steps)
res_8x8_dl2_layers, res_8x8_dl2_steps = downsample_kxk_model(layer, data_shape, 8, hdim, hdim_per_conv, step_size, nfs=[nfs] * 2, kss=[3] * 2, sts=[1] * 2, pds=["SAME"] * 2, dls=[2] * 2, steps=[layer_steps] * 2, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_8x8_dl2_layers)
transform_steps.extend(res_8x8_dl2_steps)
res_8x8_layers, res_8x8_steps = downsample_kxk_model(layer, data_shape, 8, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[1] * 3, steps=[layer_steps] * 4, min_convs=1, downsample_method=downsample_method, init_dense=True)
transform_layers.extend(res_8x8_layers)
transform_steps.extend(res_8x8_steps)
res_12x12_dl2_layers, res_12x12_dl2_steps = downsample_kxk_model(layer, data_shape, 12, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_12x12_dl2_layers)
transform_steps.extend(res_12x12_dl2_steps)
res_12x12_layers, res_12x12_steps = downsample_kxk_model(layer, data_shape, 12, hdim, hdim_per_conv, step_size, nfs=[nfs] * 5, kss=[3] * 5, sts=[1] * 5, pds=["SAME"] * 5, dls=[1] * 5, steps=[layer_steps] * 5, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_12x12_layers)
transform_steps.extend(res_12x12_steps)
res_16x16_dl2_layers, res_16x16_dl2_steps = downsample_kxk_model(layer, data_shape, 16, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_16x16_dl2_layers)
transform_steps.extend(res_16x16_dl2_steps)
res_16x16_layers, res_16x16_steps = downsample_kxk_model(layer, data_shape, 16, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_16x16_layers)
transform_steps.extend(res_16x16_steps)
res_24x24_layers, res_24x24_steps = downsample_kxk_model(layer, data_shape, 24, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_24x24_layers)
transform_steps.extend(res_24x24_steps)
res_32x32_layers, res_32x32_steps = kxk_model(layer, data_shape, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 5 + [1000] * 2, min_convs=1, init_dense=False)
transform_layers.extend(res_32x32_layers)
transform_steps.extend(res_32x32_steps)
return transform_layers, transform_steps
def celeba_model(data_shape, mask, hdim, hdim_per_conv, step_size, layer_steps=300, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, downsample_method="lanczos3", dequantize=True):
dim = np.prod(data_shape)
layer = functools.partial(layers.layer, dim=dim, hdim=hdim, mask=mask, forward=forward, inverse=inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, dequantize=dequantize, clip=1.0)
transform_layers, transform_steps = [], []
lowres_layers, lowres_steps = low_rez_dense_model(layer, data_shape, hdim, step_size, downsample_method, rezs=list(range(1, 8)), steps=[50] + [layer_steps] * 6)
transform_layers.extend(lowres_layers)
transform_steps.extend(lowres_steps)
res_8x8_dl2_layers, res_8x8_dl2_steps = downsample_kxk_model(layer, data_shape, 8, hdim, hdim_per_conv, step_size, nfs=[nfs] * 2, kss=[3] * 2, sts=[1] * 2, pds=["SAME"] * 2, dls=[2] * 2, steps=[layer_steps] * 2, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_8x8_dl2_layers)
transform_steps.extend(res_8x8_dl2_steps)
res_8x8_layers, res_8x8_steps = downsample_kxk_model(layer, data_shape, 8, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[1] * 3, steps=[layer_steps] * 4, min_convs=1, downsample_method=downsample_method, init_dense=True)
transform_layers.extend(res_8x8_layers)
transform_steps.extend(res_8x8_steps)
res_12x12_dl2_layers, res_12x12_dl2_steps = downsample_kxk_model(layer, data_shape, 12, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_12x12_dl2_layers)
transform_steps.extend(res_12x12_dl2_steps)
res_12x12_layers, res_12x12_steps = downsample_kxk_model(layer, data_shape, 12, hdim, hdim_per_conv, step_size, nfs=[nfs] * 5, kss=[3] * 5, sts=[1] * 5, pds=["SAME"] * 5, dls=[1] * 5, steps=[layer_steps] * 5, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_12x12_layers)
transform_steps.extend(res_12x12_steps)
res_16x16_dl2_layers, res_16x16_dl2_steps = downsample_kxk_model(layer, data_shape, 16, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_16x16_dl2_layers)
transform_steps.extend(res_16x16_dl2_steps)
res_16x16_layers, res_16x16_steps = downsample_kxk_model(layer, data_shape, 16, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_16x16_layers)
transform_steps.extend(res_16x16_steps)
res_24x24_layers, res_24x24_steps = downsample_kxk_model(layer, data_shape, 24, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_24x24_layers)
transform_steps.extend(res_24x24_steps)
res_32x32_layers, res_32x32_steps = downsample_kxk_model(layer, data_shape, 32, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_32x32_layers)
transform_steps.extend(res_32x32_steps)
res_64x64_layers, res_64x64_steps = kxk_model(layer, data_shape, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 5 + [1000] * 2, min_convs=1, init_dense=False)
transform_layers.extend(res_64x64_layers)
transform_steps.extend(res_64x64_steps)
return transform_layers, transform_steps
| 18,274 | 60.949153 | 286 | py |
Conditionial-SWF | Conditionial-SWF-main/data/get_celebA.py | import resource
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
def resize_small(image, resolution):
"""Shrink an image to the given resolution."""
h, w = image.shape[0], image.shape[1]
ratio = resolution / min(h, w)
# h = tf.round(h * ratio, tf.int32)
# w = tf.round(w * ratio, tf.int32)
h = int(h * ratio)
w = int(w * ratio)
return tf.image.resize(image, [h, w], antialias=True)
def central_crop(image, size):
"""Crop the center of an image to the given size."""
top = (image.shape[0] - size) // 2
left = (image.shape[1] - size) // 2
return tf.image.crop_to_bounding_box(image, top, left, size, size)
def get_celeba_dataset(uniform_dequantization=False):
dataset_builder = tfds.builder("celeb_a")
train_split_name = "train"
eval_split_name = "validation"
def resize_op(img):
img = tf.image.convert_image_dtype(img, tf.float32)
img = central_crop(img, 140)
img = resize_small(img, 64)
return img
def preprocess_fn(d):
"""Basic preprocessing function scales data to [0, 1) and randomly flips."""
img = resize_op(d["image"])
if uniform_dequantization:
img = (tf.random.uniform(img.shape, dtype=tf.float32) + img * 255.0) / 256.0
# return dict(image=img, label=d.get('label', None))
return dict(image=img, label=d.get("label", 0))
def create_dataset(dataset_builder, split):
dataset_options = tf.data.Options()
dataset_options.experimental_optimization.map_parallelization = True
dataset_options.experimental_threading.private_threadpool_size = 48
dataset_options.experimental_threading.max_intra_op_parallelism = 1
read_config = tfds.ReadConfig(options=dataset_options)
if isinstance(dataset_builder, tfds.core.DatasetBuilder):
dataset_builder.download_and_prepare()
ds = dataset_builder.as_dataset(split=split, shuffle_files=True, read_config=read_config)
else:
ds = dataset_builder.with_options(dataset_options)
# ds = ds.repeat(count=num_epochs)
# ds = ds.shuffle(shuffle_buffer_size)
ds = ds.map(preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# ds = ds.batch(batch_size, drop_remainder=True)
# return ds.prefetch(prefetch_size)
return ds
train_ds = create_dataset(dataset_builder, train_split_name)
eval_ds = create_dataset(dataset_builder, eval_split_name)
return train_ds, eval_ds, dataset_builder
train_ds, eval_ds, dataset_builder = get_celeba_dataset()
train_ds_numpy = train_ds.as_numpy_iterator()
train_list = []
for ex in train_ds_numpy:
train_list.append(np.moveaxis(ex["image"], -1, 0))
xtrain = np.stack(train_list)
test_ds_numpy = eval_ds.as_numpy_iterator()
test_list = []
for ex in test_ds_numpy:
test_list.append(np.moveaxis(ex["image"], -1, 0))
xtest = np.stack(test_list)
with open("celeba_train.npy", "wb") as f:
np.save(f, xtrain.reshape(xtrain.shape[0], -1))
with open("celeba_eval.npy", "wb") as f:
np.save(f, xtest.reshape(xtest.shape[0], -1))
| 3,109 | 33.555556 | 95 | py |
blockchain-explorer | blockchain-explorer-main/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: Apache-2.0
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Hyperledger Explorer'
copyright = u'Hyperledger Explorer Project source code is released under the Apache 2.0 license'
author = u'Hyperledger Explorer'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# extensions = [
# 'sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.todo',
# 'sphinx.ext.coverage',
# 'sphinx.ext.mathjax',
# 'sphinx.ext.ifconfig',
# 'sphinx.ext.viewcode',
# 'sphinx.ext.githubpages',
#]
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# recommonmark is a python utility that allows markdown to be used within
# Sphinx projects.
# Installed version as per directive in docs/requirement.txt
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['.xxrst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
#html_theme = 'sphinxdoc'
# html_theme = 'traditional'
# html_theme = 'bizstyle'
# html_theme = 'haiku'
# html_theme = 'nature'
# html_theme = 'pyramid'
# html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'canonical_url': '',
'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'both',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'HyperledgerExplorerDocument'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'HyperledgerExplorer.tex', u'Hyperledger Explorer Documentation',
u'Explorer', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hyperledgerexplorer', u'Hyperledger Explorer Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HyperledgerExplorer', u'Hyperledger Explorer Documentation',
author, 'HyperledgerExplorer', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
extensions = ['sphinxcontrib.contentui']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 7,264 | 28.653061 | 96 | py |
DLFuzz | DLFuzz-master/ImageNet/utils_tmp.py | # -*- coding: utf-8 -*-
import random
from collections import defaultdict
import numpy as np
from datetime import datetime
from keras import backend as K
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.models import Model
from keras.preprocessing import image
model_layer_weights_top_k = []
def preprocess_image(img_path):
img = image.load_img(img_path, target_size=(224, 224))
input_img_data = image.img_to_array(img)
input_img_data = np.expand_dims(input_img_data, axis=0)
input_img_data = preprocess_input(input_img_data) # final input shape = (1,224,224,3)
return input_img_data
def deprocess_image(x):
x = x.reshape((224, 224, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def decode_label(pred):
return decode_predictions(pred)[0][0][1]
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def constraint_occl(gradients, start_point, rect_shape):
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def constraint_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = 1e4 * np.mean(gradients)
return grad_mean * new_grads
def constraint_black(gradients, rect_shape=(10, 10)):
start_point = (
random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def init_coverage_tables(model1, model2, model3):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
init_dict(model2, model_layer_dict2)
init_dict(model3, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_coverage_tables(model1):
model_layer_dict1 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
return model_layer_dict1
def init_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def init_coverage_times(model):
model_layer_times = defaultdict(int)
init_times(model,model_layer_times)
return model_layer_times
def init_coverage_value(model):
model_layer_value = defaultdict(float)
init_times(model, model_layer_value)
return model_layer_value
def init_times(model,model_layer_times):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_times[(layer.name, index)] = 0
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def neuron_to_cover(not_covered,model_layer_dict):
if not_covered:
layer_name, index = random.choice(not_covered)
not_covered.remove((layer_name, index))
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def random_strategy(model,model_layer_times, neuron_to_cover_num):
loss_neuron = []
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_times.items() if v == 0]
for _ in xrange(neuron_to_cover_num):
layer_name, index = neuron_to_cover(not_covered, model_layer_times)
loss00_neuron = K.mean(model.get_layer(layer_name).output[..., index])
# if loss_neuron == 0:
# loss_neuron = loss00_neuron
# else:
# loss_neuron += loss00_neuron
# loss_neuron += loss1_neuron
loss_neuron.append(loss00_neuron)
return loss_neuron
def neuron_select_high_weight(model, layer_names, top_k):
global model_layer_weights_top_k
model_layer_weights_dict = {}
for layer_name in layer_names:
weights = model.get_layer(layer_name).get_weights()
if len(weights) <= 0:
continue
w = np.asarray(weights[0]) # 0 is weights, 1 is biases
w = w.reshape(w.shape)
for index in range(model.get_layer(layer_name).output_shape[-1]):
index_w = np.mean(w[..., index])
if index_w <= 0:
continue
model_layer_weights_dict[(layer_name,index)]=index_w
# notice!
model_layer_weights_list = sorted(model_layer_weights_dict.items(), key=lambda x: x[1], reverse=True)
k = 0
for (layer_name, index),weight in model_layer_weights_list:
if k >= top_k:
break
model_layer_weights_top_k.append([layer_name,index])
k += 1
def neuron_selection(model, model_layer_times, model_layer_value, neuron_select_strategy, neuron_to_cover_num,threshold):
if neuron_select_strategy == 'None':
return random_strategy(model, model_layer_times, neuron_to_cover_num)
num_strategy = len([x for x in neuron_select_strategy if x in ['0', '1', '2', '3']])
neuron_to_cover_num_each = neuron_to_cover_num / num_strategy
loss_neuron = []
# initialization for strategies
if ('0' in list(neuron_select_strategy)) or ('1' in list(neuron_select_strategy)):
i = 0
neurons_covered_times = []
neurons_key_pos = {}
for (layer_name, index), time in model_layer_times.items():
neurons_covered_times.append(time)
neurons_key_pos[i] = (layer_name, index)
i += 1
neurons_covered_times = np.asarray(neurons_covered_times)
times_total = sum(neurons_covered_times)
# select neurons covered often
if '0' in list(neuron_select_strategy):
if times_total == 0:
return random_strategy(model, model_layer_times, 1)#The beginning of no neurons covered
neurons_covered_percentage = neurons_covered_times / float(times_total)
num_neuron0 = np.random.choice(range(len(neurons_covered_times)), neuron_to_cover_num_each, replace=False, p=neurons_covered_percentage)
for num in num_neuron0:
layer_name0, index0 = neurons_key_pos[num]
loss0_neuron = K.mean(model.get_layer(layer_name0).output[..., index0])
loss_neuron.append(loss0_neuron)
# select neurons covered rarely
if '1' in list(neuron_select_strategy):
if times_total == 0:
return random_strategy(model, model_layer_times, 1)
neurons_covered_times_inverse = np.subtract(max(neurons_covered_times), neurons_covered_times)
neurons_covered_percentage_inverse = neurons_covered_times_inverse / float(sum(neurons_covered_times_inverse))
# num_neuron1 = np.random.choice(range(len(neurons_covered_times)), p=neurons_covered_percentage_inverse)
num_neuron1 = np.random.choice(range(len(neurons_covered_times)), neuron_to_cover_num_each, replace=False,
p=neurons_covered_percentage_inverse)
for num in num_neuron1:
layer_name1, index1 = neurons_key_pos[num]
loss1_neuron = K.mean(model.get_layer(layer_name1).output[..., index1])
loss_neuron.append(loss1_neuron)
# select neurons with largest weights (feature maps with largest filter weights)
if '2' in list(neuron_select_strategy):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
k = 0.1
top_k = k * len(model_layer_times) # number of neurons to be selected within
global model_layer_weights_top_k
if len(model_layer_weights_top_k) == 0:
neuron_select_high_weight(model, layer_names, top_k) # Set the value
num_neuron2 = np.random.choice(range(len(model_layer_weights_top_k)), neuron_to_cover_num_each, replace=False)
for i in num_neuron2:
layer_name2 = model_layer_weights_top_k[i][0]
index2 = model_layer_weights_top_k[i][1]
loss2_neuron = K.mean(model.get_layer(layer_name2).output[..., index2])
loss_neuron.append(loss2_neuron)
if '3' in list(neuron_select_strategy):
above_threshold = []
below_threshold = []
above_num = neuron_to_cover_num_each / 2
below_num = neuron_to_cover_num_each - above_num
above_i = 0
below_i = 0
for (layer_name, index), value in model_layer_value.items():
if threshold + 0.25 > value > threshold and layer_name != 'fc1' and layer_name != 'fc2' and \
layer_name != 'predictions' and layer_name != 'fc1000' and above_i < above_num:
above_threshold.append([layer_name, index])
above_i += 1
elif threshold > value > threshold - 0.2 and layer_name != 'fc1' and layer_name != 'fc2' and \
layer_name != 'predictions' and layer_name != 'fc1000' and below_i < below_num:
below_threshold.append([layer_name, index])
below_i += 1
loss_neuron = []
if len(above_threshold) > 0:
for above_item in range(len(above_threshold)):
loss_neuron.append(K.mean(
model.get_layer(above_threshold[above_item][0]).output[..., above_threshold[above_item][1]]))
if len(below_threshold) > 0:
for below_item in range(len(below_threshold)):
loss_neuron.append(-K.mean(
model.get_layer(below_threshold[below_item][0]).output[..., below_threshold[below_item][1]]))
if loss_neuron == 0:
return random_strategy(model, model_layer_times, 1) # The beginning of no neurons covered
return loss_neuron
def neuron_scale(loss_neuron):
loss_neuron_new = []
loss_sum = K.sum(loss_neuron)
for loss_each in loss_neuron:
loss_each /= loss_sum
loss_neuron_new.append(loss_each)
return loss_neuron_new
def neuron_scale_maxmin(loss_neuron):
max_loss = K.max(loss_neuron)
min_loss = K.min(loss_neuron)
base = max_loss - min_loss
loss_neuron_new = []
for loss_each in loss_neuron:
loss_each_new = (loss_each - min_loss) / base
loss_neuron_new.append(loss_each_new)
return loss_neuron_new
def neuron_covered(model_layer_times):
covered_neurons = len([v for v in model_layer_times.values() if v > 0])
total_neurons = len(model_layer_times)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def update_coverage(input_data, model, model_layer_times, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold: #and model_layer_dict[(layer_names[i], num_neuron)] == 0:
model_layer_times[(layer_names[i], num_neuron)] += 1
return intermediate_layer_outputs
def update_coverage_value(input_data, model, model_layer_value):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
model_layer_value[(layer_names[i], num_neuron)] = np.mean(scaled[..., num_neuron])
return intermediate_layer_outputs
'''
def update_coverage(input_data, model, model_layer_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
return intermediate_layer_outputs
'''
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
if not predictions1 == predictions2 == predictions3:
return True
return False
def get_signature():
now = datetime.now()
past = datetime(2015, 6, 6, 0, 0, 0, 0)
timespan = now - past
time_sig = int(timespan.total_seconds() * 1000)
return str(time_sig)
| 15,272 | 40.167116 | 144 | py |
DLFuzz | DLFuzz-master/ImageNet/gen_diff.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import shutil
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.layers import Input
from scipy.misc import imsave
from utils_tmp import *
import sys
import os
import time
# input image dimensions
img_rows, img_cols = 224, 224
input_shape = (img_rows, img_cols, 3)
# define input tensor as a placeholder
input_tensor = Input(shape=input_shape)
# load multiple models sharing same input tensor
K.set_learning_phase(0)
model_name = sys.argv[6]
if model_name == 'vgg16':
model1 = VGG16(input_tensor=input_tensor)
elif model_name == 'vgg19':
model1 = VGG19(input_tensor=input_tensor)
elif model_name == 'resnet50':
model1 = ResNet50(input_tensor=input_tensor)
else:
print('please specify model name')
os._exit(0)
print(model1.name)
# model_layer_dict1 = init_coverage_tables(model1)
model_layer_times1 = init_coverage_times(model1) # times of each neuron covered
model_layer_times2 = init_coverage_times(model1) # update when new image and adversarial images found
model_layer_value1 = init_coverage_value(model1)
# start gen inputs
img_dir = './seeds_20'
img_paths = os.listdir(img_dir)
img_num = len(img_paths)
# e.g.[0,1,2] None for neurons not covered, 0 for covered often, 1 for covered rarely, 2 for high weights
neuron_select_strategy = sys.argv[1]
threshold = float(sys.argv[2])
neuron_to_cover_num = int(sys.argv[3])
subdir = sys.argv[4]
iteration_times = int(sys.argv[5])
predict_weight = 0.5
neuron_to_cover_weight = 0.5
learning_step = 0.5
save_dir = './generated_inputs/' + subdir + '/'
if os.path.exists(save_dir):
for i in os.listdir(save_dir):
path_file = os.path.join(save_dir, i)
if os.path.isfile(path_file):
os.remove(path_file)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# start = time.clock()
total_time = 0
total_norm = 0
adversial_num = 0
total_perturb_adversial = 0
for i in xrange(img_num):
start_time = time.clock()
img_list = []
img_path = os.path.join(img_dir,img_paths[i])
print(img_path)
tmp_img = preprocess_image(img_path)
orig_img = tmp_img.copy()
img_list.append(tmp_img)
update_coverage(tmp_img, model1, model_layer_times2, threshold)
while len(img_list) > 0:
gen_img = img_list[0]
img_list.remove(gen_img)
# first check if input already induces differences
pred1 = model1.predict(gen_img)
label1 = np.argmax(pred1[0])
label_top5 = np.argsort(pred1[0])[-5:]
update_coverage_value(gen_img, model1, model_layer_value1)
update_coverage(gen_img, model1, model_layer_times1, threshold)
orig_label = label1
orig_pred = pred1
if model1.name == 'resnet50':
loss_1 = K.mean(model1.get_layer('fc1000').output[..., orig_label])
loss_2 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-2]])
loss_3 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-3]])
loss_4 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-4]])
loss_5 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-5]])
else:
loss_1 = K.mean(model1.get_layer('predictions').output[..., orig_label])
loss_2 = K.mean(model1.get_layer('predictions').output[..., label_top5[-2]])
loss_3 = K.mean(model1.get_layer('predictions').output[..., label_top5[-3]])
loss_4 = K.mean(model1.get_layer('predictions').output[..., label_top5[-4]])
loss_5 = K.mean(model1.get_layer('predictions').output[..., label_top5[-5]])
layer_output = (predict_weight * (loss_2 + loss_3 + loss_4 + loss_5) - loss_1)
# neuron coverage loss
loss_neuron = neuron_selection(model1, model_layer_times1, model_layer_value1, neuron_select_strategy,
neuron_to_cover_num,threshold)
# extreme value means the activation value for a neuron can be as high as possible ...
EXTREME_VALUE = False
if EXTREME_VALUE:
neuron_to_cover_weight = 2
layer_output += neuron_to_cover_weight * K.sum(loss_neuron)
# for adversarial image generation
final_loss = K.mean(layer_output)
# we compute the gradient of the input picture wrt this loss
grads = normalize(K.gradients(final_loss, input_tensor)[0])
grads_tensor_list = [loss_1, loss_2, loss_3, loss_4, loss_5]
grads_tensor_list.extend(loss_neuron)
grads_tensor_list.append(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_tensor], grads_tensor_list)
# we run gradient ascent for some steps
for iters in xrange(iteration_times):
loss_neuron_list = iterate([gen_img])
perturb = loss_neuron_list[-1] * learning_step
gen_img += perturb
# previous accumulated neuron coverage
previous_coverage = neuron_covered(model_layer_times1)[2]
pred1 = model1.predict(gen_img)
label1 = np.argmax(pred1[0])
update_coverage(gen_img, model1, model_layer_times1, threshold) # for seed selection
current_coverage = neuron_covered(model_layer_times1)[2]
diff_img = gen_img - orig_img
L2_norm = np.linalg.norm(diff_img)
orig_L2_norm = np.linalg.norm(orig_img)
perturb_adversial = L2_norm / orig_L2_norm
if current_coverage - previous_coverage > 0.01 / (i + 1) and perturb_adversial < 0.02:
img_list.append(gen_img)
# print('coverage diff = ', current_coverage - previous_coverage, 'perturb_adversial = ', perturb_adversial)
if label1 != orig_label:
update_coverage(gen_img, model1, model_layer_times2, threshold)
total_norm += L2_norm
total_perturb_adversial += perturb_adversial
# print('L2 norm : ' + str(L2_norm))
# print('ratio perturb = ', perturb_adversial)
gen_img_tmp = gen_img.copy()
gen_img_deprocessed = deprocess_image(gen_img_tmp)
save_img = save_dir + decode_label(pred1) + '-' + decode_label(orig_pred) + '-' + str(get_signature()) + '.png'
imsave(save_img, gen_img_deprocessed)
adversial_num += 1
end_time = time.clock()
print('covered neurons percentage %d neurons %.3f'
% (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))
duration = end_time - start_time
print('used time : ' + str(duration))
total_time += duration
print('covered neurons percentage %d neurons %.3f'
% (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))
print('total_time = ' + str(total_time))
print('average_norm = ' + str(total_norm / adversial_num))
print('adversial num = ' + str(adversial_num))
print('average perb adversial = ' + str(total_perturb_adversial / adversial_num))
| 7,199 | 30.858407 | 127 | py |
DLFuzz | DLFuzz-master/MNIST/Model2.py | '''
LeNet-4
'''
# usage: python MNISTModel2.py - train the model
from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Convolution2D, MaxPooling2D, Input, Dense, Activation, Flatten
from keras.models import Model
from keras.utils import to_categorical
def Model2(input_tensor=None, train=False):
nb_classes = 10
# convolution kernel size
kernel_size = (5, 5)
if train:
batch_size = 256
nb_epoch = 10
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
input_tensor = Input(shape=input_shape)
elif input_tensor is None:
print('you have to proved input_tensor when testing')
exit()
# block1
print("in Model2 input_tensor = ",input_tensor)
x = Convolution2D(6, kernel_size, activation='relu', padding='same', name='block1_conv1')(input_tensor)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool1')(x)
# block2
x = Convolution2D(16, kernel_size, activation='relu', padding='same', name='block2_conv1')(x)
x = MaxPooling2D(pool_size=(2, 2), name='block2_pool1')(x)
x = Flatten(name='flatten')(x)
x = Dense(84, activation='relu', name='fc1')(x)
x = Dense(nb_classes, name='before_softmax')(x)
x = Activation('softmax', name='predictions')(x)
model = Model(input_tensor, x)
if train:
# compiling
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# trainig
model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=nb_epoch, verbose=1)
# save model
model.save_weights('./Model2.h5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n')
print('Overall Test score:', score[0])
print('Overall Test accuracy:', score[1])
else:
model.load_weights('./Model2.h5')
print('Model2 loaded')
return model
if __name__ == '__main__':
Model2(train=True)
| 2,636 | 30.023529 | 120 | py |
DLFuzz | DLFuzz-master/MNIST/Model3.py | '''
LeNet-5
'''
# usage: python MNISTModel3.py - train the model
from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Convolution2D, MaxPooling2D, Input, Dense, Activation, Flatten
from keras.models import Model
from keras.utils import to_categorical
def Model3(input_tensor=None, train=False):
nb_classes = 10
# convolution kernel size
kernel_size = (5, 5)
if train:
batch_size = 256
nb_epoch = 10
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
input_tensor = Input(shape=input_shape)
elif input_tensor is None:
print('you have to proved input_tensor when testing')
exit()
# block1
x = Convolution2D(6, kernel_size, activation='relu', padding='same', name='block1_conv1')(input_tensor)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool1')(x)
# block2
x = Convolution2D(16, kernel_size, activation='relu', padding='same', name='block2_conv1')(x)
x = MaxPooling2D(pool_size=(2, 2), name='block2_pool1')(x)
x = Flatten(name='flatten')(x)
x = Dense(120, activation='relu', name='fc1')(x)
x = Dense(84, activation='relu', name='fc2')(x)
x = Dense(nb_classes, name='before_softmax')(x)
x = Activation('softmax', name='predictions')(x)
model = Model(input_tensor, x)
if train:
# compiling
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# trainig
model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=nb_epoch, verbose=1)
# save model
model.save_weights('./Model3.h5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n')
print('Overall Test score:', score[0])
print('Overall Test accuracy:', score[1])
else:
model.load_weights('./Model3.h5')
print('Model3 loaded')
return model
if __name__ == '__main__':
Model3(train=True)
| 2,637 | 30.035294 | 120 | py |
DLFuzz | DLFuzz-master/MNIST/utils_tmp.py | # -*- coding: utf-8 -*-
import random
from collections import defaultdict
import numpy as np
from datetime import datetime
from keras import backend as K
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.models import Model
from keras.preprocessing import image
model_layer_weights_top_k = []
def preprocess_image(img_path):
img = image.load_img(img_path, target_size=(28, 28), grayscale=True)
input_img_data = image.img_to_array(img)
input_img_data = input_img_data.reshape(1, 28, 28, 1)
input_img_data = input_img_data.astype('float32')
input_img_data /= 255
# input_img_data = preprocess_input(input_img_data) # final input shape = (1,224,224,3)
return input_img_data
def deprocess_image(x):
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x.reshape(x.shape[1], x.shape[2]) # original shape (1,img_rows, img_cols,1)
def decode_label(pred):
return decode_predictions(pred)[0][0][1]
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def constraint_occl(gradients, start_point, rect_shape):
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def constraint_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = 1e4 * np.mean(gradients)
return grad_mean * new_grads
def constraint_black(gradients, rect_shape=(10, 10)):
start_point = (
random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def init_coverage_tables(model1, model2, model3):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
init_dict(model2, model_layer_dict2)
init_dict(model3, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_coverage_tables(model1):
model_layer_dict1 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
return model_layer_dict1
def init_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def init_coverage_times(model):
model_layer_times = defaultdict(int)
init_times(model,model_layer_times)
return model_layer_times
def init_coverage_value(model):
model_layer_value = defaultdict(float)
init_times(model, model_layer_value)
return model_layer_value
def init_times(model,model_layer_times):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_times[(layer.name, index)] = 0
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def neuron_to_cover(not_covered,model_layer_dict):
if not_covered:
layer_name, index = random.choice(not_covered)
not_covered.remove((layer_name, index))
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def random_strategy(model,model_layer_times, neuron_to_cover_num):
loss_neuron = []
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_times.items() if v == 0]
for _ in xrange(neuron_to_cover_num):
layer_name, index = neuron_to_cover(not_covered, model_layer_times)
loss00_neuron = K.mean(model.get_layer(layer_name).output[..., index])
# if loss_neuron == 0:
# loss_neuron = loss00_neuron
# else:
# loss_neuron += loss00_neuron
# loss_neuron += loss1_neuron
loss_neuron.append(loss00_neuron)
return loss_neuron
def neuron_select_high_weight(model, layer_names, top_k):
global model_layer_weights_top_k
model_layer_weights_dict = {}
for layer_name in layer_names:
weights = model.get_layer(layer_name).get_weights()
if len(weights) <= 0:
continue
w = np.asarray(weights[0]) # 0 is weights, 1 is biases
w = w.reshape(w.shape)
for index in range(model.get_layer(layer_name).output_shape[-1]):
index_w = np.mean(w[..., index])
if index_w <= 0:
continue
model_layer_weights_dict[(layer_name,index)]=index_w
# notice!
model_layer_weights_list = sorted(model_layer_weights_dict.items(), key=lambda x: x[1], reverse=True)
k = 0
for (layer_name, index),weight in model_layer_weights_list:
if k >= top_k:
break
model_layer_weights_top_k.append([layer_name,index])
k += 1
def neuron_selection(model, model_layer_times, model_layer_value, neuron_select_strategy, neuron_to_cover_num, threshold):
if neuron_select_strategy == 'None':
return random_strategy(model, model_layer_times, neuron_to_cover_num)
num_strategy = len([x for x in neuron_select_strategy if x in ['0', '1', '2', '3']])
neuron_to_cover_num_each = neuron_to_cover_num / num_strategy
loss_neuron = []
# initialization for strategies
if ('0' in list(neuron_select_strategy)) or ('1' in list(neuron_select_strategy)):
i = 0
neurons_covered_times = []
neurons_key_pos = {}
for (layer_name, index), time in model_layer_times.items():
neurons_covered_times.append(time)
neurons_key_pos[i] = (layer_name, index)
i += 1
neurons_covered_times = np.asarray(neurons_covered_times)
times_total = sum(neurons_covered_times)
# select neurons covered often
if '0' in list(neuron_select_strategy):
if times_total == 0:
return random_strategy(model, model_layer_times, 1)#The beginning of no neurons covered
neurons_covered_percentage = neurons_covered_times / float(times_total)
# num_neuron0 = np.random.choice(range(len(neurons_covered_times)), p=neurons_covered_percentage)
num_neuron0 = np.random.choice(range(len(neurons_covered_times)), neuron_to_cover_num_each, replace=False, p=neurons_covered_percentage)
for num in num_neuron0:
layer_name0, index0 = neurons_key_pos[num]
loss0_neuron = K.mean(model.get_layer(layer_name0).output[..., index0])
loss_neuron.append(loss0_neuron)
# select neurons covered rarely
if '1' in list(neuron_select_strategy):
if times_total == 0:
return random_strategy(model, model_layer_times, 1)
neurons_covered_times_inverse = np.subtract(max(neurons_covered_times), neurons_covered_times)
neurons_covered_percentage_inverse = neurons_covered_times_inverse / float(sum(neurons_covered_times_inverse))
# num_neuron1 = np.random.choice(range(len(neurons_covered_times)), p=neurons_covered_percentage_inverse)
num_neuron1 = np.random.choice(range(len(neurons_covered_times)), neuron_to_cover_num_each, replace=False,
p=neurons_covered_percentage_inverse)
for num in num_neuron1:
layer_name1, index1 = neurons_key_pos[num]
loss1_neuron = K.mean(model.get_layer(layer_name1).output[..., index1])
loss_neuron.append(loss1_neuron)
# select neurons with largest weights (feature maps with largest filter weights)
if '2' in list(neuron_select_strategy):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
k = 0.1
top_k = k * len(model_layer_times) # number of neurons to be selected within
global model_layer_weights_top_k
if len(model_layer_weights_top_k) == 0:
neuron_select_high_weight(model, layer_names, top_k) # Set the value
num_neuron2 = np.random.choice(range(len(model_layer_weights_top_k)), neuron_to_cover_num_each, replace=False)
for i in num_neuron2:
# i = np.random.choice(range(len(model_layer_weights_top_k)))
layer_name2 = model_layer_weights_top_k[i][0]
index2 = model_layer_weights_top_k[i][1]
loss2_neuron = K.mean(model.get_layer(layer_name2).output[..., index2])
loss_neuron.append(loss2_neuron)
if '3' in list(neuron_select_strategy):
above_threshold = []
below_threshold = []
above_num = neuron_to_cover_num_each / 2
below_num = neuron_to_cover_num_each - above_num
above_i = 0
below_i = 0
for (layer_name, index), value in model_layer_value.items():
if threshold + 0.25 > value > threshold and layer_name != 'fc1' and layer_name != 'fc2' and \
layer_name != 'predictions' and layer_name != 'fc1000' and layer_name != 'before_softmax' \
and above_i < above_num:
above_threshold.append([layer_name, index])
above_i += 1
# print(layer_name,index,value)
# above_threshold_dict[(layer_name, index)]=value
elif threshold > value > threshold - 0.2 and layer_name != 'fc1' and layer_name != 'fc2' and \
layer_name != 'predictions' and layer_name != 'fc1000' and layer_name != 'before_softmax' \
and below_i < below_num:
below_threshold.append([layer_name, index])
below_i += 1
#
# loss3_neuron_above = 0
# loss3_neuron_below = 0
loss_neuron = []
if len(above_threshold) > 0:
for above_item in range(len(above_threshold)):
loss_neuron.append(K.mean(
model.get_layer(above_threshold[above_item][0]).output[..., above_threshold[above_item][1]]))
if len(below_threshold) > 0:
for below_item in range(len(below_threshold)):
loss_neuron.append(-K.mean(
model.get_layer(below_threshold[below_item][0]).output[..., below_threshold[below_item][1]]))
# loss_neuron += loss3_neuron_below - loss3_neuron_above
# for (layer_name, index), value in model_layer_value.items():
# if 0.5 > value > 0.25:
# above_threshold.append([layer_name, index])
# elif 0.25 > value > 0.2:
# below_threshold.append([layer_name, index])
# loss3_neuron_above = 0
# loss3_neuron_below = 0
# if len(above_threshold)>0:
# above_i = np.random.choice(range(len(above_threshold)))
# loss3_neuron_above = K.mean(model.get_layer(above_threshold[above_i][0]).output[..., above_threshold[above_i][1]])
# if len(below_threshold)>0:
# below_i = np.random.choice(range(len(below_threshold)))
# loss3_neuron_below = K.mean(model.get_layer(below_threshold[below_i][0]).output[..., below_threshold[below_i][1]])
# loss_neuron += loss3_neuron_below - loss3_neuron_above
if loss_neuron == 0:
return random_strategy(model, model_layer_times, 1) # The beginning of no neurons covered
return loss_neuron
def neuron_scale(loss_neuron):
loss_neuron_new = []
loss_sum = K.sum(loss_neuron)
for loss_each in loss_neuron:
loss_each /= loss_sum
loss_neuron_new.append(loss_each)
return loss_neuron_new
def neuron_scale_maxmin(loss_neuron):
max_loss = K.max(loss_neuron)
min_loss = K.min(loss_neuron)
base = max_loss - min_loss
loss_neuron_new = []
for loss_each in loss_neuron:
loss_each_new = (loss_each - min_loss) / base
loss_neuron_new.append(loss_each_new)
return loss_neuron_new
def neuron_covered(model_layer_times):
covered_neurons = len([v for v in model_layer_times.values() if v > 0])
total_neurons = len(model_layer_times)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def update_coverage(input_data, model, model_layer_times, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold: #and model_layer_dict[(layer_names[i], num_neuron)] == 0:
model_layer_times[(layer_names[i], num_neuron)] += 1
return intermediate_layer_outputs
def update_coverage_value(input_data, model, model_layer_value):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
model_layer_value[(layer_names[i], num_neuron)] = np.mean(scaled[..., num_neuron])
return intermediate_layer_outputs
'''
def update_coverage(input_data, model, model_layer_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
return intermediate_layer_outputs
'''
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
# if predictions2 == predictions3 == target and predictions1 != target:
if not predictions1 == predictions2 == predictions3:
return True
return False
def get_signature():
now = datetime.now()
past = datetime(2015, 6, 6, 0, 0, 0, 0)
timespan = now - past
time_sig = int(timespan.total_seconds() * 1000)
return str(time_sig)
| 16,769 | 41.671756 | 144 | py |
DLFuzz | DLFuzz-master/MNIST/gen_diff.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from keras.layers import Input
from scipy.misc import imsave
from utils_tmp import *
import sys
import os
import time
from Model1 import Model1
from Model2 import Model2
from Model3 import Model3
def load_data(path="../MNIST_data/mnist.npz"):
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test)
# input image dimensions
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
# define input tensor as a placeholder
input_tensor = Input(shape=input_shape)
# load multiple models sharing same input tensor
K.set_learning_phase(0)
model_name = sys.argv[6]
if model_name == 'model1':
model1 = Model1(input_tensor=input_tensor)
elif model_name == 'model2':
model1 = Model2(input_tensor=input_tensor)
elif model_name == 'model3':
model1 = Model3(input_tensor=input_tensor)
else:
print('please specify model name')
os._exit(0)
print(model1.name)
# model_layer_dict1 = init_coverage_tables(model1)
model_layer_times1 = init_coverage_times(model1) # times of each neuron covered
model_layer_times2 = init_coverage_times(model1) # update when new image and adversarial images found
model_layer_value1 = init_coverage_value(model1)
# start gen inputs
# img_paths = image.list_pictures('../seeds_20', ext='JPEG')
img_dir = './seeds_50'
img_paths = os.listdir(img_dir)
img_num = len(img_paths)
# e.g.[0,1,2] None for neurons not covered, 0 for covered often, 1 for covered rarely, 2 for high weights
neuron_select_strategy = sys.argv[1]
threshold = float(sys.argv[2])
neuron_to_cover_num = int(sys.argv[3])
subdir = sys.argv[4]
iteration_times = int(sys.argv[5])
neuron_to_cover_weight = 0.5
predict_weight = 0.5
learning_step = 0.02
save_dir = './generated_inputs/' + subdir + '/'
if os.path.exists(save_dir):
for i in os.listdir(save_dir):
path_file = os.path.join(save_dir, i)
if os.path.isfile(path_file):
os.remove(path_file)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# start = time.clock()
total_time = 0
total_norm = 0
adversial_num = 0
total_perturb_adversial = 0
for i in xrange(img_num):
start_time = time.clock()
img_list = []
img_path = os.path.join(img_dir,img_paths[i])
img_name = img_paths[i].split('.')[0]
mannual_label = int(img_name.split('_')[1])
print(img_path)
tmp_img = preprocess_image(img_path)
orig_img = tmp_img.copy()
img_list.append(tmp_img)
update_coverage(tmp_img, model1, model_layer_times2, threshold)
while len(img_list) > 0:
gen_img = img_list[0]
img_list.remove(gen_img)
# first check if input already induces differences
pred1 = model1.predict(gen_img)
label1 = np.argmax(pred1[0])
label_top5 = np.argsort(pred1[0])[-5:]
update_coverage_value(gen_img, model1, model_layer_value1)
update_coverage(gen_img, model1, model_layer_times1, threshold)
orig_label = label1
orig_pred = pred1
loss_1 = K.mean(model1.get_layer('before_softmax').output[..., orig_label])
loss_2 = K.mean(model1.get_layer('before_softmax').output[..., label_top5[-2]])
loss_3 = K.mean(model1.get_layer('before_softmax').output[..., label_top5[-3]])
loss_4 = K.mean(model1.get_layer('before_softmax').output[..., label_top5[-4]])
loss_5 = K.mean(model1.get_layer('before_softmax').output[..., label_top5[-5]])
layer_output = (predict_weight * (loss_2 + loss_3 + loss_4 + loss_5) - loss_1)
# neuron coverage loss
loss_neuron = neuron_selection(model1, model_layer_times1, model_layer_value1, neuron_select_strategy,
neuron_to_cover_num, threshold)
# loss_neuron = neuron_scale(loss_neuron) # useless, and negative result
# extreme value means the activation value for a neuron can be as high as possible ...
EXTREME_VALUE = False
if EXTREME_VALUE:
neuron_to_cover_weight = 2
layer_output += neuron_to_cover_weight * K.sum(loss_neuron)
# for adversarial image generation
final_loss = K.mean(layer_output)
# we compute the gradient of the input picture wrt this loss
grads = normalize(K.gradients(final_loss, input_tensor)[0])
grads_tensor_list = [loss_1, loss_2, loss_3, loss_4, loss_5]
grads_tensor_list.extend(loss_neuron)
grads_tensor_list.append(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_tensor], grads_tensor_list)
# we run gradient ascent for 3 steps
for iters in xrange(iteration_times):
loss_neuron_list = iterate([gen_img])
perturb = loss_neuron_list[-1] * learning_step
gen_img += perturb
# previous accumulated neuron coverage
previous_coverage = neuron_covered(model_layer_times1)[2]
pred1 = model1.predict(gen_img)
label1 = np.argmax(pred1[0])
update_coverage(gen_img, model1, model_layer_times1, threshold) # for seed selection
current_coverage = neuron_covered(model_layer_times1)[2]
diff_img = gen_img - orig_img
L2_norm = np.linalg.norm(diff_img)
orig_L2_norm = np.linalg.norm(orig_img)
perturb_adversial = L2_norm / orig_L2_norm
if current_coverage - previous_coverage > 0.01 / (i + 1) and perturb_adversial < 0.02:
img_list.append(gen_img)
# print('coverage diff = ', current_coverage - previous_coverage, 'perturb_adversial = ', perturb_adversial)
if label1 != orig_label:
update_coverage(gen_img, model1, model_layer_times2, threshold)
total_norm += L2_norm
total_perturb_adversial += perturb_adversial
# print('L2 norm : ' + str(L2_norm))
# print('ratio perturb = ', perturb_adversial)
gen_img_tmp = gen_img.copy()
gen_img_deprocessed = deprocess_image(gen_img_tmp)
save_img = save_dir + img_name + '_' + str(get_signature()) + '.png'
imsave(save_img, gen_img_deprocessed)
adversial_num += 1
end_time = time.clock()
print('covered neurons percentage %d neurons %.3f'
% (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))
duration = end_time - start_time
print('used time : ' + str(duration))
total_time += duration
print('covered neurons percentage %d neurons %.3f'
% (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))
print('total_time = ' + str(total_time))
print('average_norm = ' + str(total_norm / adversial_num))
print('adversial num = ' + str(adversial_num))
print('average perb adversial = ' + str(total_perturb_adversial / adversial_num))
| 7,071 | 29.614719 | 124 | py |
DLFuzz | DLFuzz-master/MNIST/Model1.py | '''
LeNet-1
'''
# usage: python MNISTModel1.py - train the model
from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Convolution2D, MaxPooling2D, Input, Dense, Activation, Flatten
from keras.models import Model
from keras.utils import to_categorical
from keras import backend as K
import numpy as np
def load_data(path="MNIST_data/mnist.npz"):
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test)
def Model1(input_tensor=None, train=False):
nb_classes = 10
# convolution kernel size
kernel_size = (5, 5)
if train:
batch_size = 256
nb_epoch = 10
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
(x_train, y_train), (x_test, y_test) = load_data()
print(x_train.shape)
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
input_tensor = Input(shape=input_shape)
elif input_tensor is None:
print('you have to proved input_tensor when testing')
exit()
# block1
# print("in Model1 input_tensor = ",input_tensor)
x = Convolution2D(4, kernel_size, activation='relu', padding='same', name='block1_conv1')(input_tensor)
# print("in Model1 x = ", x)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool1')(x)
# block2
x = Convolution2D(12, kernel_size, activation='relu', padding='same', name='block2_conv1')(x)
x = MaxPooling2D(pool_size=(2, 2), name='block2_pool1')(x)
x = Flatten(name='flatten')(x)
x = Dense(nb_classes, name='before_softmax')(x)
x = Activation('softmax', name='predictions')(x)
model = Model(input_tensor, x)
if train:
# compiling
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# trainig
model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=nb_epoch, verbose=1)
# save model
model.save_weights('./Model1.h5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n')
print('Overall Test score:', score[0])
print('Overall Test accuracy:', score[1])
else:
model.load_weights('./Model1.h5')
print('Model1 loaded')
# K.clear_session()
return model
if __name__ == '__main__':
Model1(train=True)
| 3,009 | 29.714286 | 120 | py |
pylops | pylops-master/setup.py | import os
from setuptools import find_packages, setup
def src(pth):
return os.path.join(os.path.dirname(__file__), pth)
# Project description
descr = (
"Python library implementing linear operators to allow solving large-scale optimization "
"problems without requiring to explicitly create a dense (or sparse) matrix."
)
# Setup
setup(
name="pylops",
description=descr,
long_description=open(src("README.md")).read(),
long_description_content_type="text/markdown",
keywords=["algebra", "inverse problems", "large-scale optimization"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)",
"Natural Language :: English",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Mathematics",
],
author="mrava",
author_email="matteoravasi@gmail.com",
install_requires=["numpy >= 1.21.0", "scipy >= 1.4.0"],
extras_require={
"advanced": [
"llvmlite",
"numba",
"pyfftw",
"PyWavelets",
"scikit-fmm",
"spgl1",
]
},
packages=find_packages(exclude=["pytests"]),
use_scm_version=dict(
root=".", relative_to=__file__, write_to=src("pylops/version.py")
),
setup_requires=["pytest-runner", "setuptools_scm"],
test_suite="pytests",
tests_require=["pytest"],
zip_safe=True,
)
| 1,647 | 28.963636 | 93 | py |
pylops | pylops-master/tutorials/torchop.py | r"""
19. Automatic Differentiation
=============================
This tutorial focuses on the use of :class:`pylops.TorchOperator` to allow performing
Automatic Differentiation (AD) on chains of operators which can be:
- native PyTorch mathematical operations (e.g., :func:`torch.log`,
:func:`torch.sin`, :func:`torch.tan`, :func:`torch.pow`, ...)
- neural network operators in :mod:`torch.nn`
- PyLops linear operators
This opens up many opportunities, such as easily including linear regularization
terms to nonlinear cost functions or using linear preconditioners with nonlinear
modelling operators.
"""
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import gradcheck
import pylops
plt.close("all")
np.random.seed(10)
torch.manual_seed(10)
###############################################################################
# In this example we consider a simple multidimensional functional:
#
# .. math::
# \mathbf{y} = \mathbf{A} sin(\mathbf{x})
#
# and we use AD to compute the gradient with respect to the input vector
# evaluated at :math:`\mathbf{x}=\mathbf{x}_0` :
# :math:`\mathbf{g} = d\mathbf{y} / d\mathbf{x} |_{\mathbf{x}=\mathbf{x}_0}`.
#
# Let's start by defining the Jacobian:
#
# .. math::
# \textbf{J} = \begin{bmatrix}
# dy_1 / dx_1 & ... & dy_1 / dx_M \\
# ... & ... & ... \\
# dy_N / dx_1 & ... & dy_N / dx_M
# \end{bmatrix} = \begin{bmatrix}
# a_{11} cos(x_1) & ... & a_{1M} cos(x_M) \\
# ... & ... & ... \\
# a_{N1} cos(x_1) & ... & a_{NM} cos(x_M)
# \end{bmatrix} = \textbf{A} cos(\mathbf{x})
#
# Since both input and output are multidimensional,
# PyTorch ``backward`` actually computes the product between the transposed
# Jacobian and a vector :math:`\mathbf{v}`:
# :math:`\mathbf{g}=\mathbf{J^T} \mathbf{v}`.
#
# To validate the correctness of the AD result, we can in this simple case
# also compute the Jacobian analytically and apply it to the same vector
# :math:`\mathbf{v}` that we have provided to PyTorch ``backward``.
nx, ny = 10, 6
x0 = torch.arange(nx, dtype=torch.double, requires_grad=True)
# Forward
A = np.random.normal(0.0, 1.0, (ny, nx))
At = torch.from_numpy(A)
Aop = pylops.TorchOperator(pylops.MatrixMult(A))
y = Aop.apply(torch.sin(x0))
# AD
v = torch.ones(ny, dtype=torch.double)
y.backward(v, retain_graph=True)
adgrad = x0.grad
# Analytical
J = At * torch.cos(x0)
anagrad = torch.matmul(J.T, v)
print("Input: ", x0)
print("AD gradient: ", adgrad)
print("Analytical gradient: ", anagrad)
###############################################################################
# Similarly we can use the :func:`torch.autograd.gradcheck` directly from
# PyTorch. Note that doubles must be used for this to succeed with very small
# `eps` and `atol`
input = (
torch.arange(nx, dtype=torch.double, requires_grad=True),
Aop.matvec,
Aop.rmatvec,
Aop.device,
"cpu",
)
test = gradcheck(Aop.Top, input, eps=1e-6, atol=1e-4)
print(test)
###############################################################################
# Note that while matrix-vector multiplication could have been performed using
# the native PyTorch operator :func:`torch.matmul`, in this case we have shown
# that we are also able to use a PyLops operator wrapped in
# :class:`pylops.TorchOperator`. As already mentioned, this gives us the
# ability to use much more complex linear operators provided by PyLops within
# a chain of mixed linear and nonlinear AD-enabled operators.
# To conclude, let's see how we can chain a torch convolutional network
# with PyLops :class:`pylops.Smoothing2D` operator. First of all, we consider
# a single training sample.
class Network(nn.Module):
def __init__(self, input_channels):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(
input_channels, input_channels // 2, kernel_size=3, padding=1
)
self.conv2 = nn.Conv2d(
input_channels // 2, input_channels // 4, kernel_size=3, padding=1
)
self.activation = nn.LeakyReLU(0.2)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.conv1(x)
x = self.activation(x)
x = self.conv2(x)
x = self.activation(x)
return x
net = Network(4)
Cop = pylops.TorchOperator(pylops.Smoothing2D((5, 5), dims=(32, 32)))
# Forward
x = torch.randn(1, 4, 32, 32).requires_grad_()
y = Cop.apply(net(x).view(-1)).reshape(32, 32)
# Backward
loss = y.sum()
loss.backward()
fig, axs = plt.subplots(1, 2, figsize=(12, 3))
axs[0].imshow(y.detach().numpy())
axs[0].set_title("Forward")
axs[0].axis("tight")
axs[1].imshow(x.grad.reshape(4 * 32, 32).T)
axs[1].set_title("Gradient")
axs[1].axis("tight")
plt.tight_layout()
###############################################################################
# And finally we do the same with a batch of 3 training samples.
net = Network(4)
Cop = pylops.TorchOperator(pylops.Smoothing2D((5, 5), dims=(32, 32)), batch=True)
# Forward
x = torch.randn(3, 4, 32, 32).requires_grad_()
y = Cop.apply(net(x).reshape(3, 32 * 32)).reshape(3, 32, 32)
# Backward
loss = y.sum()
loss.backward()
fig, axs = plt.subplots(1, 2, figsize=(12, 3))
axs[0].imshow(y[0].detach().numpy())
axs[0].set_title("Forward")
axs[0].axis("tight")
axs[1].imshow(x.grad[0].reshape(4 * 32, 32).T)
axs[1].set_title("Gradient")
axs[1].axis("tight")
plt.tight_layout()
| 5,465 | 30.964912 | 85 | py |
pylops | pylops-master/tutorials/lsm.py | r"""
15. Least-squares migration
===========================
Seismic migration is the process by which seismic data are manipulated to create
an image of the subsurface reflectivity.
While traditionally solved as the adjont of the demigration operator,
it is becoming more and more common to solve the underlying inverse problem
in the quest for more accurate and detailed subsurface images.
Indipendently of the choice of the modelling operator (i.e., ray-based or
full wavefield-based), the demigration/migration process can be expressed as
a linear operator of such a kind:
.. math::
d(\mathbf{x_r}, \mathbf{x_s}, t) =
w(t) * \int\limits_V G(\mathbf{x}, \mathbf{x_s}, t)
G(\mathbf{x_r}, \mathbf{x}, t) m(\mathbf{x})\,\mathrm{d}\mathbf{x}
where :math:`m(\mathbf{x})` is the reflectivity
at every location in the subsurface, :math:`G(\mathbf{x}, \mathbf{x_s}, t)`
and :math:`G(\mathbf{x_r}, \mathbf{x}, t)` are the Green's functions
from source-to-subsurface-to-receiver and finally :math:`w(t)` is the
wavelet. Ultimately, while the Green's functions can be computed in many different
ways, solving this system of equations for the reflectivity model is what
we generally refer to as Least-squares migration (LSM).
In this tutorial we will consider the most simple scenario where we use an
eikonal solver to compute the Green's functions and show how we can use the
:py:class:`pylops.waveeqprocessing.LSM` operator to perform LSM.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse.linalg import lsqr
import pylops
plt.close("all")
np.random.seed(0)
###############################################################################
# To start we create a simple model with 2 interfaces
# Velocity Model
nx, nz = 81, 60
dx, dz = 4, 4
x, z = np.arange(nx) * dx, np.arange(nz) * dz
v0 = 1000 # initial velocity
kv = 0.0 # gradient
vel = np.outer(np.ones(nx), v0 + kv * z)
# Reflectivity Model
refl = np.zeros((nx, nz))
refl[:, 30] = -1
refl[:, 50] = 0.5
# Receivers
nr = 11
rx = np.linspace(10 * dx, (nx - 10) * dx, nr)
rz = 20 * np.ones(nr)
recs = np.vstack((rx, rz))
dr = recs[0, 1] - recs[0, 0]
# Sources
ns = 10
sx = np.linspace(dx * 10, (nx - 10) * dx, ns)
sz = 10 * np.ones(ns)
sources = np.vstack((sx, sz))
ds = sources[0, 1] - sources[0, 0]
###############################################################################
plt.figure(figsize=(10, 5))
im = plt.imshow(vel.T, cmap="summer", extent=(x[0], x[-1], z[-1], z[0]))
plt.scatter(recs[0], recs[1], marker="v", s=150, c="b", edgecolors="k")
plt.scatter(sources[0], sources[1], marker="*", s=150, c="r", edgecolors="k")
cb = plt.colorbar(im)
cb.set_label("[m/s]")
plt.axis("tight")
plt.xlabel("x [m]"), plt.ylabel("z [m]")
plt.title("Velocity")
plt.xlim(x[0], x[-1])
plt.tight_layout()
plt.figure(figsize=(10, 5))
im = plt.imshow(refl.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]))
plt.scatter(recs[0], recs[1], marker="v", s=150, c="b", edgecolors="k")
plt.scatter(sources[0], sources[1], marker="*", s=150, c="r", edgecolors="k")
plt.colorbar(im)
plt.axis("tight")
plt.xlabel("x [m]"), plt.ylabel("z [m]")
plt.title("Reflectivity")
plt.xlim(x[0], x[-1])
plt.tight_layout()
###############################################################################
# We can now create our LSM object and invert for the reflectivity using two
# different solvers: :py:func:`scipy.sparse.linalg.lsqr` (LS solution) and
# :py:func:`pylops.optimization.sparsity.fista` (LS solution with sparse model).
nt = 651
dt = 0.004
t = np.arange(nt) * dt
wav, wavt, wavc = pylops.utils.wavelets.ricker(t[:41], f0=20)
lsm = pylops.waveeqprocessing.LSM(
z,
x,
t,
sources,
recs,
v0,
wav,
wavc,
mode="analytic",
engine="numba",
)
d = lsm.Demop * refl
madj = lsm.Demop.H * d
minv = lsm.solve(d.ravel(), solver=lsqr, **dict(iter_lim=100))
minv = minv.reshape(nx, nz)
minv_sparse = lsm.solve(
d.ravel(), solver=pylops.optimization.sparsity.fista, **dict(eps=1e2, niter=100)
)
minv_sparse = minv_sparse.reshape(nx, nz)
# demigration
d = d.reshape(ns, nr, nt)
dadj = lsm.Demop * madj # (ns * nr, nt)
dadj = dadj.reshape(ns, nr, nt)
dinv = lsm.Demop * minv
dinv = dinv.reshape(ns, nr, nt)
dinv_sparse = lsm.Demop * minv_sparse
dinv_sparse = dinv_sparse.reshape(ns, nr, nt)
# sphinx_gallery_thumbnail_number = 2
fig, axs = plt.subplots(2, 2, figsize=(10, 8))
axs[0][0].imshow(refl.T, cmap="gray", vmin=-1, vmax=1)
axs[0][0].axis("tight")
axs[0][0].set_title(r"$m$")
axs[0][1].imshow(madj.T, cmap="gray", vmin=-madj.max(), vmax=madj.max())
axs[0][1].set_title(r"$m_{adj}$")
axs[0][1].axis("tight")
axs[1][0].imshow(minv.T, cmap="gray", vmin=-1, vmax=1)
axs[1][0].axis("tight")
axs[1][0].set_title(r"$m_{inv}$")
axs[1][1].imshow(minv_sparse.T, cmap="gray", vmin=-1, vmax=1)
axs[1][1].axis("tight")
axs[1][1].set_title(r"$m_{FISTA}$")
plt.tight_layout()
fig, axs = plt.subplots(1, 4, figsize=(10, 4))
axs[0].imshow(d[0, :, :300].T, cmap="gray", vmin=-d.max(), vmax=d.max())
axs[0].set_title(r"$d$")
axs[0].axis("tight")
axs[1].imshow(dadj[0, :, :300].T, cmap="gray", vmin=-dadj.max(), vmax=dadj.max())
axs[1].set_title(r"$d_{adj}$")
axs[1].axis("tight")
axs[2].imshow(dinv[0, :, :300].T, cmap="gray", vmin=-d.max(), vmax=d.max())
axs[2].set_title(r"$d_{inv}$")
axs[2].axis("tight")
axs[3].imshow(dinv_sparse[0, :, :300].T, cmap="gray", vmin=-d.max(), vmax=d.max())
axs[3].set_title(r"$d_{fista}$")
axs[3].axis("tight")
plt.tight_layout()
fig, axs = plt.subplots(1, 4, figsize=(10, 4))
axs[0].imshow(d[ns // 2, :, :300].T, cmap="gray", vmin=-d.max(), vmax=d.max())
axs[0].set_title(r"$d$")
axs[0].axis("tight")
axs[1].imshow(dadj[ns // 2, :, :300].T, cmap="gray", vmin=-dadj.max(), vmax=dadj.max())
axs[1].set_title(r"$d_{adj}$")
axs[1].axis("tight")
axs[2].imshow(dinv[ns // 2, :, :300].T, cmap="gray", vmin=-d.max(), vmax=d.max())
axs[2].set_title(r"$d_{inv}$")
axs[2].axis("tight")
axs[3].imshow(dinv_sparse[ns // 2, :, :300].T, cmap="gray", vmin=-d.max(), vmax=d.max())
axs[3].set_title(r"$d_{fista}$")
axs[3].axis("tight")
plt.tight_layout()
###############################################################################
# This was just a short teaser, for a more advanced set of examples of 2D and
# 3D traveltime-based LSM head over to this
# `notebook <https://github.com/mrava87/pylops_notebooks/blob/master/developement/LeastSquaresMigration.ipynb>`_.
| 6,408 | 32.380208 | 113 | py |
pylops | pylops-master/tutorials/deblending.py | r"""
18. Deblending
==============
The cocktail party problem arises when sounds from different sources mix before reaching our ears
(or any recording device), requiring the brain (or any hardware in the recording device) to estimate
individual sources from the received mixture. In seismic acquisition, an analog problem is present
when multiple sources are fired simultaneously. This family of acquisition methods is usually referred to as
simultaneous shooting and the problem of separating the blended shot gathers into their individual
components is called deblending. Whilst various firing strategies can be adopted, in this example
we consider the continuous blending problem where a single source is fired sequentially at an interval
shorter than the amount of time required for waves to travel into the Earth and come back.
Simply stated the forward problem can be written as:
.. math::
\mathbf{d}^b = \boldsymbol\Phi \mathbf{d}
Here :math:`\mathbf{d} = [\mathbf{d}_1^T, \mathbf{d}_2^T,\ldots,
\mathbf{d}_N^T]^T` is a stack of :math:`N` individual shot gathers,
:math:`\boldsymbol\Phi=[\boldsymbol\Phi_1, \boldsymbol\Phi_2,\ldots,
\boldsymbol\Phi_N]` is the blending operator, :math:`\mathbf{d}^b` is the
so-called supergather than contains all shots superimposed to each other.
In order to successfully invert this severely under-determined problem, two key
ingredients must be introduced:
- the firing time of each source (i.e., shifts of the blending operator) must be
chosen to be dithered around a nominal regular, periodic firing interval.
In our case, we consider shots of duration :math:`T=4\,\text{s}`, regular firing time of :math:`T_s=2\,\text{s}`
and a dithering code as follows :math:`\Delta t = U(-1,1)`;
- prior information about the data to reconstruct, either in the form of regularization
or preconditioning must be introduced. In our case we will use a patch-FK transform
as preconditioner and solve the problem imposing sparsity in the transformed domain.
In other words, we aim to solve the following problem:
.. math::
J = \|\mathbf{d}^b - \boldsymbol\Phi \mathbf{S}^H \mathbf{x}\|_2 + \epsilon \|\mathbf{x}\|_1
for which we will use the :py:class:`pylops.optimization.sparsity.fista` solver.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse.linalg import lobpcg as sp_lobpcg
import pylops
np.random.seed(10)
plt.close("all")
###############################################################################
# We can now load and display a small portion of the MobilAVO dataset composed
# of 60 shots and a single receiver. This data is unblended.
data = np.load("../testdata/deblending/mobil.npy")
ns, nt = data.shape
dt = 0.004
t = np.arange(nt) * dt
fig, ax = plt.subplots(1, 1, figsize=(12, 8))
ax.imshow(
data.T,
cmap="gray",
vmin=-50,
vmax=50,
extent=(0, ns, t[-1], 0),
interpolation="none",
)
ax.set_title("CRG")
ax.set_xlabel("#Src")
ax.set_ylabel("t [s]")
ax.axis("tight")
plt.tight_layout()
###############################################################################
# We are now ready to define the blending operator, blend our data, and apply
# the adjoint of the blending operator to it. This is usually referred as
# pseudo-deblending: as we will see brings back each source to its own nominal
# firing time, but since sources partially overlap in time, it will also generate
# some burst like noise in the data. Deblending can hopefully fix this.
overlap = 0.5
ignition_times = 2.0 * np.random.rand(ns) - 1.0
ignition_times = np.arange(0, overlap * nt * ns, overlap * nt) * dt + ignition_times
ignition_times[0] = 0.0
Bop = pylops.waveeqprocessing.BlendingContinuous(
nt, 1, ns, dt, ignition_times, dtype="complex128"
)
data_blended = Bop * data[:, np.newaxis]
data_pseudo = Bop.H * data_blended
data_pseudo = data_pseudo.reshape(ns, nt)
fig, ax = plt.subplots(1, 1, figsize=(12, 8))
ax.imshow(
data_pseudo.T.real,
cmap="gray",
vmin=-50,
vmax=50,
extent=(0, ns, t[-1], 0),
interpolation="none",
)
ax.set_title("Pseudo-deblended CRG")
ax.set_xlabel("#Src")
ax.set_ylabel("t [s]")
ax.axis("tight")
plt.tight_layout()
###############################################################################
# We are finally ready to solve our deblending inverse problem
# Patched FK
dimsd = data.shape
nwin = (20, 80)
nover = (10, 40)
nop = (128, 128)
nop1 = (128, 65)
nwins = (5, 24)
dims = (nwins[0] * nop1[0], nwins[1] * nop1[1])
Fop = pylops.signalprocessing.FFT2D(nwin, nffts=nop, real=True)
Sop = pylops.signalprocessing.Patch2D(
Fop.H, dims, dimsd, nwin, nover, nop1, tapertype="hanning"
)
# Overall operator
Op = Bop * Sop
# Compute max eigenvalue (we do this explicitly to be able to run this fast)
Op1 = Op.H * Op
maxeig = np.abs(Op1.eigs(1, niter=5, ncv=5, tol=5e-2))[0]
alpha = 1.0 / maxeig
# Deblend
niter = 60
decay = (np.exp(-0.05 * np.arange(niter)) + 0.2) / 1.2
p_inv = pylops.optimization.sparsity.fista(
Op,
data_blended.ravel(),
niter=niter,
eps=5e0,
alpha=alpha,
decay=decay,
show=True,
)[0]
data_inv = Sop * p_inv
data_inv = data_inv.reshape(ns, nt)
fig, axs = plt.subplots(1, 4, sharey=False, figsize=(12, 8))
axs[0].imshow(
data.T.real,
cmap="gray",
extent=(0, ns, t[-1], 0),
vmin=-50,
vmax=50,
interpolation="none",
)
axs[0].set_title("CRG")
axs[0].set_xlabel("#Src")
axs[0].set_ylabel("t [s]")
axs[0].axis("tight")
axs[1].imshow(
data_pseudo.T.real,
cmap="gray",
extent=(0, ns, t[-1], 0),
vmin=-50,
vmax=50,
interpolation="none",
)
axs[1].set_title("Pseudo-deblended CRG")
axs[1].set_xlabel("#Src")
axs[1].axis("tight")
axs[2].imshow(
data_inv.T.real,
cmap="gray",
extent=(0, ns, t[-1], 0),
vmin=-50,
vmax=50,
interpolation="none",
)
axs[2].set_xlabel("#Src")
axs[2].set_title("Deblended CRG")
axs[2].axis("tight")
axs[3].imshow(
data.T.real - data_inv.T.real,
cmap="gray",
extent=(0, ns, t[-1], 0),
vmin=-50,
vmax=50,
interpolation="none",
)
axs[3].set_xlabel("#Src")
axs[3].set_title("Blending error")
axs[3].axis("tight")
plt.tight_layout()
###############################################################################
# Finally, let's look a bit more at what really happened under the hood. We
# display a number of patches and their associated FK spectrum
Sop1 = pylops.signalprocessing.Patch2D(
Fop.H, dims, dimsd, nwin, nover, nop1, tapertype=None
)
# Original
p = Sop1.H * data.ravel()
preshape = p.reshape(nwins[0], nwins[1], nop1[0], nop1[1])
it = 16 # index of window along time axis for plotting
fig, axs = plt.subplots(2, 4, figsize=(12, 5))
fig.suptitle("Data patches")
for i, ix in enumerate(range(4)):
axs[0][i].imshow(np.fft.fftshift(np.abs(preshape[ix, it]).T, axes=1))
axs[0][i].axis("tight")
axs[1][i].imshow(
np.real((Fop.H * preshape[ix, it].ravel()).reshape(nwin)).T,
cmap="gray",
vmin=-30,
vmax=30,
interpolation="none",
)
axs[1][i].axis("tight")
plt.tight_layout()
# Pseudo-deblended
p_pseudo = Sop1.H * data_pseudo.ravel()
p_pseudoreshape = p_pseudo.reshape(nwins[0], nwins[1], nop1[0], nop1[1])
fig, axs = plt.subplots(2, 4, figsize=(12, 5))
fig.suptitle("Pseudo-deblended patches")
for i, ix in enumerate(range(4)):
axs[0][i].imshow(np.fft.fftshift(np.abs(p_pseudoreshape[ix, it]).T, axes=1))
axs[0][i].axis("tight")
axs[1][i].imshow(
np.real((Fop.H * p_pseudoreshape[ix, it].ravel()).reshape(nwin)).T,
cmap="gray",
vmin=-30,
vmax=30,
interpolation="none",
)
axs[1][i].axis("tight")
plt.tight_layout()
# Deblended
p_inv = Sop1.H * data_inv.ravel()
p_invreshape = p_inv.reshape(nwins[0], nwins[1], nop1[0], nop1[1])
fig, axs = plt.subplots(2, 4, figsize=(12, 5))
fig.suptitle("Deblended patches")
for i, ix in enumerate(range(4)):
axs[0][i].imshow(np.fft.fftshift(np.abs(p_invreshape[ix, it]).T, axes=1))
axs[0][i].axis("tight")
axs[1][i].imshow(
np.real((Fop.H * p_invreshape[ix, it].ravel()).reshape(nwin)).T,
cmap="gray",
vmin=-30,
vmax=30,
interpolation="none",
)
axs[1][i].axis("tight")
plt.tight_layout()
| 8,243 | 30.227273 | 114 | py |
pylops | pylops-master/tutorials/realcomplex.py | r"""
17. Real/Complex Inversion
==========================
In this tutorial we will discuss two equivalent approaches to the solution
of inverse problems with real-valued model vector and complex-valued data vector.
In other words, we consider a modelling operator
:math:`\mathbf{A}:\mathbb{F}^m \to \mathbb{C}^n` (which could be the case
for example for the real FFT).
Mathematically speaking, this problem can be solved equivalently by inverting
the complex-valued problem:
.. math::
\mathbf{y} = \mathbf{A} \mathbf{x}
or the real-valued augmented system
.. math::
\DeclareMathOperator{\Real}{Re}
\DeclareMathOperator{\Imag}{Im}
\begin{bmatrix}
\Real(\mathbf{y}) \\
\Imag(\mathbf{y})
\end{bmatrix} =
\begin{bmatrix}
\Real(\mathbf{A}) \\
\Imag(\mathbf{A})
\end{bmatrix} \mathbf{x}
Whilst we already know how to solve the first problem, let's see how we can
solve the second one by taking advantage of the ``real`` method of the
:class:`pylops.LinearOperator` object. We will also wrap our linear operator
into a :class:`pylops.MemoizeOperator` which remembers the last N model and
data vectors and by-passes the computation of the forward and/or adjoint pass
whenever the same pair reappears. This is very useful in our case when we
want to compute the real and the imag components of
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(0)
###############################################################################
# To start we create the forward problem
n = 5
x = np.arange(n) + 1.0
# make A
Ar = np.random.normal(0, 1, (n, n))
Ai = np.random.normal(0, 1, (n, n))
A = Ar + 1j * Ai
Aop = pylops.MatrixMult(A, dtype=np.complex128)
y = Aop @ x
###############################################################################
# Let's check we can solve this problem using the first formulation
A1op = Aop.toreal(forw=False, adj=True)
xinv = A1op.div(y)
print(f"xinv={xinv}\n")
###############################################################################
# Let's now see how we formulate the second problem
Amop = pylops.MemoizeOperator(Aop, max_neval=10)
Arop = Amop.toreal()
Aiop = Amop.toimag()
A1op = pylops.VStack([Arop, Aiop])
y1 = np.concatenate([np.real(y), np.imag(y)])
xinv1 = np.real(A1op.div(y1))
print(f"xinv1={xinv1}\n")
| 2,359 | 29.25641 | 81 | py |
pylops | pylops-master/tutorials/mdd.py | """
09. Multi-Dimensional Deconvolution
===================================
This example shows how to set-up and run the
:py:class:`pylops.waveeqprocessing.MDD` inversion using synthetic data.
"""
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pylops
from pylops.utils.seismicevents import hyperbolic2d, makeaxis
from pylops.utils.tapers import taper3d
from pylops.utils.wavelets import ricker
warnings.filterwarnings("ignore")
plt.close("all")
# sphinx_gallery_thumbnail_number = 5
###############################################################################
# Let's start by creating a set of hyperbolic events to be used as
# our MDC kernel
# Input parameters
par = {
"ox": -150,
"dx": 10,
"nx": 31,
"oy": -250,
"dy": 10,
"ny": 51,
"ot": 0,
"dt": 0.004,
"nt": 300,
"f0": 20,
"nfmax": 200,
}
t0_m = [0.2]
vrms_m = [700.0]
amp_m = [1.0]
t0_G = [0.2, 0.5, 0.7]
vrms_G = [800.0, 1200.0, 1500.0]
amp_G = [1.0, 0.6, 0.5]
# Taper
tap = taper3d(par["nt"], [par["ny"], par["nx"]], (5, 5), tapertype="hanning")
# Create axis
t, t2, x, y = makeaxis(par)
# Create wavelet
wav = ricker(t[:41], f0=par["f0"])[0]
# Generate model
m, mwav = hyperbolic2d(x, t, t0_m, vrms_m, amp_m, wav)
# Generate operator
G, Gwav = np.zeros((par["ny"], par["nx"], par["nt"])), np.zeros(
(par["ny"], par["nx"], par["nt"])
)
for iy, y0 in enumerate(y):
G[iy], Gwav[iy] = hyperbolic2d(x - y0, t, t0_G, vrms_G, amp_G, wav)
G, Gwav = G * tap, Gwav * tap
# Add negative part to data and model
m = np.concatenate((np.zeros((par["nx"], par["nt"] - 1)), m), axis=-1)
mwav = np.concatenate((np.zeros((par["nx"], par["nt"] - 1)), mwav), axis=-1)
Gwav2 = np.concatenate((np.zeros((par["ny"], par["nx"], par["nt"] - 1)), Gwav), axis=-1)
# Define MDC linear operator
Gwav_fft = np.fft.rfft(Gwav2, 2 * par["nt"] - 1, axis=-1)
Gwav_fft = (Gwav_fft[..., : par["nfmax"]]).transpose(2, 0, 1)
MDCop = pylops.waveeqprocessing.MDC(
Gwav_fft,
nt=2 * par["nt"] - 1,
nv=1,
dt=0.004,
dr=1.0,
)
# Create data
d = MDCop * m.T.ravel()
d = d.reshape(2 * par["nt"] - 1, par["ny"]).T
###############################################################################
# Let's display what we have so far: operator, input model, and data
fig, axs = plt.subplots(1, 2, figsize=(8, 6))
axs[0].imshow(
Gwav2[int(par["ny"] / 2)].T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-np.abs(Gwav2.max()),
vmax=np.abs(Gwav2.max()),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
axs[0].set_title("G - inline view", fontsize=15)
axs[0].set_xlabel(r"$x_R$")
axs[1].set_ylabel(r"$t$")
axs[1].imshow(
Gwav2[:, int(par["nx"] / 2)].T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-np.abs(Gwav2.max()),
vmax=np.abs(Gwav2.max()),
extent=(y.min(), y.max(), t2.max(), t2.min()),
)
axs[1].set_title("G - inline view", fontsize=15)
axs[1].set_xlabel(r"$x_S$")
axs[1].set_ylabel(r"$t$")
fig.tight_layout()
fig, axs = plt.subplots(1, 2, figsize=(8, 6))
axs[0].imshow(
mwav.T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-np.abs(mwav.max()),
vmax=np.abs(mwav.max()),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
axs[0].set_title(r"$m$", fontsize=15)
axs[0].set_xlabel(r"$x_R$")
axs[1].set_ylabel(r"$t$")
axs[1].imshow(
d.T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-np.abs(d.max()),
vmax=np.abs(d.max()),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
axs[1].set_title(r"$d$", fontsize=15)
axs[1].set_xlabel(r"$x_S$")
axs[1].set_ylabel(r"$t$")
fig.tight_layout()
###############################################################################
# We are now ready to feed our operator to
# :py:class:`pylops.waveeqprocessing.MDD` and invert back for our input model
minv, madj, psfinv, psfadj = pylops.waveeqprocessing.MDD(
Gwav,
d[:, par["nt"] - 1 :],
dt=par["dt"],
dr=par["dx"],
nfmax=par["nfmax"],
wav=wav,
twosided=True,
add_negative=True,
adjoint=True,
psf=True,
dottest=False,
**dict(damp=1e-4, iter_lim=20, show=0)
)
fig = plt.figure(figsize=(8, 6))
ax1 = plt.subplot2grid((1, 5), (0, 0), colspan=2)
ax2 = plt.subplot2grid((1, 5), (0, 2), colspan=2)
ax3 = plt.subplot2grid((1, 5), (0, 4))
ax1.imshow(
madj.T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-np.abs(madj.max()),
vmax=np.abs(madj.max()),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
ax1.set_title("Adjoint m", fontsize=15)
ax1.set_xlabel(r"$x_V$")
axs[1].set_ylabel(r"$t$")
ax2.imshow(
minv.T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-np.abs(minv.max()),
vmax=np.abs(minv.max()),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
ax2.set_title("Inverted m", fontsize=15)
ax2.set_xlabel(r"$x_V$")
axs[1].set_ylabel(r"$t$")
ax3.plot(
madj[int(par["nx"] / 2)] / np.abs(madj[int(par["nx"] / 2)]).max(), t2, "r", lw=5
)
ax3.plot(
minv[int(par["nx"] / 2)] / np.abs(minv[int(par["nx"] / 2)]).max(), t2, "k", lw=3
)
ax3.set_ylim([t2[-1], t2[0]])
fig.tight_layout()
fig, axs = plt.subplots(1, 2, figsize=(8, 6))
axs[0].imshow(
psfinv[int(par["nx"] / 2)].T,
aspect="auto",
interpolation="nearest",
vmin=-np.abs(psfinv.max()),
vmax=np.abs(psfinv.max()),
cmap="gray",
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
axs[0].set_title("Inverted psf - inline view", fontsize=15)
axs[0].set_xlabel(r"$x_V$")
axs[1].set_ylabel(r"$t$")
axs[1].imshow(
psfinv[:, int(par["nx"] / 2)].T,
aspect="auto",
interpolation="nearest",
vmin=-np.abs(psfinv.max()),
vmax=np.abs(psfinv.max()),
cmap="gray",
extent=(y.min(), y.max(), t2.max(), t2.min()),
)
axs[1].set_title("Inverted psf - xline view", fontsize=15)
axs[1].set_xlabel(r"$x_V$")
axs[1].set_ylabel(r"$t$")
fig.tight_layout()
###############################################################################
# We repeat the same procedure but this time we will add a preconditioning
# by means of ``causality_precond`` parameter, which enforces the inverted
# model to be zero in the negative part of the time axis (as expected by
# theory). This preconditioning will have the effect of speeding up the
# convergence of the iterative solver and thus reduce the computation time
# of the deconvolution
minvprec = pylops.waveeqprocessing.MDD(
Gwav,
d[:, par["nt"] - 1 :],
dt=par["dt"],
dr=par["dx"],
nfmax=par["nfmax"],
wav=wav,
twosided=True,
add_negative=True,
adjoint=False,
psf=False,
causality_precond=True,
dottest=False,
**dict(damp=1e-4, iter_lim=50, show=0)
)
fig = plt.figure(figsize=(8, 6))
ax1 = plt.subplot2grid((1, 5), (0, 0), colspan=2)
ax2 = plt.subplot2grid((1, 5), (0, 2), colspan=2)
ax3 = plt.subplot2grid((1, 5), (0, 4))
ax1.imshow(
madj.T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-np.abs(madj.max()),
vmax=np.abs(madj.max()),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
ax1.set_title("Adjoint m", fontsize=15)
ax1.set_xlabel(r"$x_V$")
axs[1].set_ylabel(r"$t$")
ax2.imshow(
minvprec.T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-np.abs(minvprec.max()),
vmax=np.abs(minvprec.max()),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
ax2.set_title("Inverted m", fontsize=15)
ax2.set_xlabel(r"$x_V$")
axs[1].set_ylabel(r"$t$")
ax3.plot(
madj[int(par["nx"] / 2)] / np.abs(madj[int(par["nx"] / 2)]).max(), t2, "r", lw=5
)
ax3.plot(
minvprec[int(par["nx"] / 2)] / np.abs(minv[int(par["nx"] / 2)]).max(), t2, "k", lw=3
)
ax3.set_ylim([t2[-1], t2[0]])
fig.tight_layout()
| 7,744 | 25.892361 | 88 | py |
pylops | pylops-master/tutorials/seismicinterpolation.py | r"""
12. Seismic regularization
==========================
The problem of *seismic data regularization* (or interpolation) is a very
simple one to write, yet ill-posed and very hard to solve.
The forward modelling operator is a simple :py:class:`pylops.Restriction`
operator which is applied along the spatial direction(s).
.. math::
\mathbf{y} = \mathbf{R} \mathbf{x}
Here :math:`\mathbf{y} = [\mathbf{y}_{R_1}^T, \mathbf{y}_{R_2}^T,\ldots,
\mathbf{y}_{R_N^T}]^T` where each vector :math:`\mathbf{y}_{R_i}`
contains all time samples recorded in the seismic data at the specific
receiver :math:`R_i`. Similarly, :math:`\mathbf{x} = [\mathbf{x}_{r_1}^T,
\mathbf{x}_{r_2}^T,\ldots, \mathbf{x}_{r_M}^T]`, contains all traces at the
regularly and finely sampled receiver locations :math:`r_i`.
By inverting such an equation we can create a regularized data with
densely and regularly spatial direction(s).
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import convolve
import pylops
from pylops.utils.seismicevents import linear2d, makeaxis
from pylops.utils.wavelets import ricker
np.random.seed(0)
plt.close("all")
###############################################################################
# Let's start by creating a very simple 2d data composed of 3 linear events
# input parameters
par = {"ox": 0, "dx": 2, "nx": 70, "ot": 0, "dt": 0.004, "nt": 80, "f0": 20}
v = 1500
t0_m = [0.1, 0.2, 0.28]
theta_m = [0, 30, -80]
phi_m = [0]
amp_m = [1.0, -2, 0.5]
# axis
taxis, t2, xaxis, y = makeaxis(par)
# wavelet
wav = ricker(taxis[:41], f0=par["f0"])[0]
# model
_, x = linear2d(xaxis, taxis, v, t0_m, theta_m, amp_m, wav)
###############################################################################
# We can now define the spatial locations along which the data has been
# sampled. In this specific example we will assume that we have access only to
# 40% of the 'original' locations.
perc_subsampling = 0.6
nxsub = int(np.round(par["nx"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nx"]))[:nxsub])
# restriction operator
Rop = pylops.Restriction((par["nx"], par["nt"]), iava, axis=0, dtype="float64")
# data
y = Rop * x.ravel()
y = y.reshape(nxsub, par["nt"])
# mask
ymask = Rop.mask(x.ravel())
# inverse
xinv = Rop / y.ravel()
xinv = xinv.reshape(par["nx"], par["nt"])
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(5, 4))
axs[0].imshow(
x.T, cmap="gray", vmin=-2, vmax=2, extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0])
)
axs[0].set_title("Model")
axs[0].axis("tight")
axs[1].imshow(
ymask.T,
cmap="gray",
vmin=-2,
vmax=2,
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[1].set_title("Masked model")
axs[1].axis("tight")
plt.tight_layout()
###############################################################################
# As we can see, inverting the restriction operator is not possible without
# adding any prior information into the inverse problem. In the following we
# will consider two possible routes:
#
# * regularized inversion with second derivative along the spatial axis
#
# .. math::
# J = \|\mathbf{y} - \mathbf{R} \mathbf{x}\|_2 +
# \epsilon_\nabla ^2 \|\nabla \mathbf{x}\|_2
#
# * sparsity-promoting inversion with :py:class:`pylops.FFT2` operator used
# as sparsyfing transform
#
# .. math::
# J = \|\mathbf{y} - \mathbf{R} \mathbf{F}^H \mathbf{x}\|_2 +
# \epsilon \|\mathbf{F}^H \mathbf{x}\|_1
# smooth inversion
D2op = pylops.SecondDerivative((par["nx"], par["nt"]), axis=0, dtype="float64")
xsmooth, _, _ = pylops.waveeqprocessing.SeismicInterpolation(
y,
par["nx"],
iava,
kind="spatial",
**dict(epsRs=[np.sqrt(0.1)], damp=np.sqrt(1e-4), iter_lim=50, show=0)
)
# sparse inversion with FFT2
nfft = 2**8
FFTop = pylops.signalprocessing.FFT2D(
dims=[par["nx"], par["nt"]], nffts=[nfft, nfft], sampling=[par["dx"], par["dt"]]
)
X = FFTop * x.ravel()
X = np.reshape(X, (nfft, nfft))
xl1, Xl1, cost = pylops.waveeqprocessing.SeismicInterpolation(
y,
par["nx"],
iava,
kind="fk",
nffts=(nfft, nfft),
sampling=(par["dx"], par["dt"]),
**dict(niter=50, eps=1e-1)
)
fig, axs = plt.subplots(1, 4, sharey=True, figsize=(13, 4))
axs[0].imshow(
x.T, cmap="gray", vmin=-2, vmax=2, extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0])
)
axs[0].set_title("Model")
axs[0].axis("tight")
axs[1].imshow(
ymask.T,
cmap="gray",
vmin=-2,
vmax=2,
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[1].set_title("Masked model")
axs[1].axis("tight")
axs[2].imshow(
xsmooth.T,
cmap="gray",
vmin=-2,
vmax=2,
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[2].set_title("Smoothed model")
axs[2].axis("tight")
axs[3].imshow(
xl1.T,
cmap="gray",
vmin=-2,
vmax=2,
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[3].set_title("L1 model")
axs[3].axis("tight")
fig, axs = plt.subplots(1, 3, figsize=(10, 2))
axs[0].imshow(
np.fft.fftshift(np.abs(X[:, : nfft // 2 - 1]), axes=0).T,
extent=(
np.fft.fftshift(FFTop.f1)[0],
np.fft.fftshift(FFTop.f1)[-1],
FFTop.f2[nfft // 2 - 1],
FFTop.f2[0],
),
)
axs[0].set_title("Model in f-k domain")
axs[0].axis("tight")
axs[0].set_xlim(-0.1, 0.1)
axs[0].set_ylim(50, 0)
axs[1].imshow(
np.fft.fftshift(np.abs(Xl1[:, : nfft // 2 - 1]), axes=0).T,
extent=(
np.fft.fftshift(FFTop.f1)[0],
np.fft.fftshift(FFTop.f1)[-1],
FFTop.f2[nfft // 2 - 1],
FFTop.f2[0],
),
)
axs[1].set_title("Reconstructed model in f-k domain")
axs[1].axis("tight")
axs[1].set_xlim(-0.1, 0.1)
axs[1].set_ylim(50, 0)
axs[2].plot(cost, "k", lw=3)
axs[2].set_title("FISTA convergence")
plt.tight_layout()
###############################################################################
# We see how adding prior information to the inversion can help improving the
# estimate of the regularized seismic data. Nevertheless, in both cases the
# reconstructed data is not perfect. A better sparsyfing transform could in
# fact be chosen here to be the linear
# :py:class:`pylops.signalprocessing.Radon2D` transform in spite of the
# :py:class:`pylops.FFT2` transform.
npx = 40
pxmax = 1e-3
px = np.linspace(-pxmax, pxmax, npx)
Radop = pylops.signalprocessing.Radon2D(taxis, xaxis, px, engine="numba")
RRop = Rop * Radop
# adjoint
Xadj_fromx = Radop.H * x.ravel()
Xadj_fromx = Xadj_fromx.reshape(npx, par["nt"])
Xadj = RRop.H * y.ravel()
Xadj = Xadj.reshape(npx, par["nt"])
# L1 inverse
xl1, Xl1, cost = pylops.waveeqprocessing.SeismicInterpolation(
y,
par["nx"],
iava,
kind="radon-linear",
spataxis=xaxis,
taxis=taxis,
paxis=px,
centeredh=True,
**dict(niter=50, eps=1e-1)
)
fig, axs = plt.subplots(2, 3, sharey=True, figsize=(12, 7))
axs[0][0].imshow(
x.T, cmap="gray", vmin=-2, vmax=2, extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0])
)
axs[0][0].set_title("Data", fontsize=12)
axs[0][0].axis("tight")
axs[0][1].imshow(
ymask.T,
cmap="gray",
vmin=-2,
vmax=2,
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[0][1].set_title("Masked data", fontsize=12)
axs[0][1].axis("tight")
axs[0][2].imshow(
xl1.T,
cmap="gray",
vmin=-2,
vmax=2,
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[0][2].set_title("Reconstructed data", fontsize=12)
axs[0][2].axis("tight")
axs[1][0].imshow(
Xadj_fromx.T,
cmap="gray",
vmin=-70,
vmax=70,
extent=(px[0], px[-1], taxis[-1], taxis[0]),
)
axs[1][0].set_title("Adj. Radon on data", fontsize=12)
axs[1][0].axis("tight")
axs[1][1].imshow(
Xadj.T, cmap="gray", vmin=-50, vmax=50, extent=(px[0], px[-1], taxis[-1], taxis[0])
)
axs[1][1].set_title("Adj. Radon on subsampled data", fontsize=12)
axs[1][1].axis("tight")
axs[1][2].imshow(
Xl1.T, cmap="gray", vmin=-0.2, vmax=0.2, extent=(px[0], px[-1], taxis[-1], taxis[0])
)
axs[1][2].set_title("Inverse Radon on subsampled data", fontsize=12)
axs[1][2].axis("tight")
plt.tight_layout()
###############################################################################
# Finally, let's take now a more realistic dataset. We will use once again the
# linear :py:class:`pylops.signalprocessing.Radon2D` transform but we will
# take advantnge of the :py:class:`pylops.signalprocessing.Sliding2D` operator
# to perform such a transform locally instead of globally to the entire
# dataset.
inputfile = "../testdata/marchenko/input.npz"
inputdata = np.load(inputfile)
x = inputdata["R"][50, :, ::2]
x = x / np.abs(x).max()
taxis, xaxis = inputdata["t"][::2], inputdata["r"][0]
par = {}
par["nx"], par["nt"] = x.shape
par["dx"] = inputdata["r"][0, 1] - inputdata["r"][0, 0]
par["dt"] = inputdata["t"][1] - inputdata["t"][0]
# add wavelet
wav = inputdata["wav"][::2]
wav_c = np.argmax(wav)
x = np.apply_along_axis(convolve, 1, x, wav, mode="full")
x = x[:, wav_c:][:, : par["nt"]]
# gain
gain = np.tile((taxis**2)[:, np.newaxis], (1, par["nx"])).T
x = x * gain
# subsampling locations
perc_subsampling = 0.5
Nsub = int(np.round(par["nx"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nx"]))[:Nsub])
# restriction operator
Rop = pylops.Restriction((par["nx"], par["nt"]), iava, axis=0, dtype="float64")
y = Rop * x.ravel()
xadj = Rop.H * y.ravel()
y = y.reshape(Nsub, par["nt"])
xadj = xadj.reshape(par["nx"], par["nt"])
# apply mask
ymask = Rop.mask(x.ravel())
# sliding windows with radon transform
dx = par["dx"]
nwins = 4
nwin = 27
nover = 3
npx = 31
pxmax = 5e-4
px = np.linspace(-pxmax, pxmax, npx)
dimsd = x.shape
dims = (nwins * npx, dimsd[1])
Op = pylops.signalprocessing.Radon2D(
taxis,
np.linspace(-par["dx"] * nwin // 2, par["dx"] * nwin // 2, nwin),
px,
centeredh=True,
kind="linear",
engine="numba",
)
Slidop = pylops.signalprocessing.Sliding2D(
Op, dims, dimsd, nwin, nover, tapertype="cosine"
)
# adjoint
RSop = Rop * Slidop
Xadj_fromx = Slidop.H * x.ravel()
Xadj_fromx = Xadj_fromx.reshape(npx * nwins, par["nt"])
Xadj = RSop.H * y.ravel()
Xadj = Xadj.reshape(npx * nwins, par["nt"])
# inverse
xl1, Xl1, _ = pylops.waveeqprocessing.SeismicInterpolation(
y,
par["nx"],
iava,
kind="sliding",
spataxis=xaxis,
taxis=taxis,
paxis=px,
nwins=nwins,
nwin=nwin,
nover=nover,
**dict(niter=50, eps=1e-2)
)
fig, axs = plt.subplots(2, 3, sharey=True, figsize=(12, 14))
axs[0][0].imshow(
x.T,
cmap="gray",
vmin=-0.1,
vmax=0.1,
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[0][0].set_title("Data")
axs[0][0].axis("tight")
axs[0][1].imshow(
ymask.T,
cmap="gray",
vmin=-0.1,
vmax=0.1,
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[0][1].set_title("Masked data")
axs[0][1].axis("tight")
axs[0][2].imshow(
xl1.T,
cmap="gray",
vmin=-0.1,
vmax=0.1,
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[0][2].set_title("Reconstructed data")
axs[0][2].axis("tight")
axs[1][0].imshow(
Xadj_fromx.T,
cmap="gray",
vmin=-1,
vmax=1,
extent=(px[0], px[-1], taxis[-1], taxis[0]),
)
axs[1][0].set_title("Adjoint Radon on data")
axs[1][0].axis("tight")
axs[1][1].imshow(
Xadj.T,
cmap="gray",
vmin=-0.6,
vmax=0.6,
extent=(px[0], px[-1], taxis[-1], taxis[0]),
)
axs[1][1].set_title("Adjoint Radon on subsampled data")
axs[1][1].axis("tight")
axs[1][2].imshow(
Xl1.T,
cmap="gray",
vmin=-0.03,
vmax=0.03,
extent=(px[0], px[-1], taxis[-1], taxis[0]),
)
axs[1][2].set_title("Inverse Radon on subsampled data")
axs[1][2].axis("tight")
plt.tight_layout()
###############################################################################
# As expected the linear :py:class:`pylops.signalprocessing.Radon2D` is
# able to locally explain events in the input data and leads to a satisfactory
# recovery. Note that increasing the number of iterations and sliding windows
# can further refine the result, especially the accuracy of weak events, as
# shown in this companion
# `notebook <https://github.com/mrava87/pylops_notebooks/blob/master/developement/SeismicInterpolation.ipynb>`_.
| 12,173 | 26.542986 | 112 | py |
pylops | pylops-master/tutorials/ilsm.py | r"""
20. Image Domain Least-squares migration
========================================
Seismic migration is the process by which seismic data are manipulated to create
an image of the subsurface reflectivity.
In one of the previous tutorials, we have seen how the process can be formulated
as an inverse problem, which requires access to a demigration-migration engine.
As performing repeated migrations and demigrations can be very expensive, an
alternative approach to obtain accurate and high-resolution estimate of the
subsurface reflectivity has emerged under the name of image-domain least-squares
migration.
In image-domain least-squares migration, we identify a direct, linear link between
the migrated image :math:`\mathbf{m}` and the sought after
reflectivity :math:`\mathbf{r}`, namely:
.. math::
\mathbf{m} = \mathbf{H} \mathbf{r}
Here :math:`\mathbf{H}` is the Hessian, which can be written as:
.. math::
\mathbf{H} = \mathbf{L}^H \mathbf{L}
where :math:`\mathbf{L}` is the demigration operator, whilst its adjoint
:math:`\mathbf{L}^H` is the migration operator. In other words, we say that the
migrated image can be seen as the result of a pair of demigration/migration of
the reflectivity.
Whilst there exists different ways to estimate :math:`\mathbf{H}`, the approach
that we will be using here entails applying demigration and migration to a special
reflectivity model composed of regularly space scatterers. What we obtain is the
spatially-varying impulse response of the migration operator, where each filter is
also usually referred to as local point spread function (PSF).
Once these PSFs are computed (an operation that requires one migration and one
demigration, much cheaper than what we do in LSM), the migrated image can be deconvolved
using the :py:class:`pylops.signalprocessing.NonStationaryConvolve2D` operator.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(0)
###############################################################################
# To start we create a simple model with 2 interfaces (the same we used in
# the LSM tutorial) and our PSF model with regularly spaced scatteres
# Velocity Model
nx, nz = 81, 60
dx, dz = 4, 4
x, z = np.arange(nx) * dx, np.arange(nz) * dz
v0 = 1000 # initial velocity
kv = 0.0 # gradient
vel = np.outer(np.ones(nx), v0 + kv * z)
# Reflectivity Model
refl = np.zeros((nx, nz))
refl[:, 30] = -1
refl[:, 50] = 0.5
# PSF Reflectivity Model
psfrefl = np.zeros((nx, nz))
psfin = (10, 15)
psfend = (-10, -5)
psfj = (30, 30)
psfx = np.arange(psfin[0], nx + psfend[0], psfj[0])
psfz = np.arange(psfin[1], nz + psfend[1], psfj[1])
Psfx, Psfz = np.meshgrid(psfx, psfz, indexing="ij")
psfrefl[psfin[0] : psfend[0] : psfj[0], psfin[1] : psfend[-1] : psfj[-1]] = 1
# Receivers
nr = 51
rx = np.linspace(10 * dx, (nx - 10) * dx, nr)
rz = 20 * np.ones(nr)
recs = np.vstack((rx, rz))
dr = recs[0, 1] - recs[0, 0]
# Sources
ns = 51
sx = np.linspace(dx * 10, (nx - 10) * dx, ns)
sz = 10 * np.ones(ns)
sources = np.vstack((sx, sz))
ds = sources[0, 1] - sources[0, 0]
fig, axs = plt.subplots(1, 3, sharey=True, figsize=(10, 5))
axs[0].imshow(vel.T, cmap="summer", extent=(x[0], x[-1], z[-1], z[0]))
axs[0].scatter(recs[0], recs[1], marker="v", s=150, c="b", edgecolors="k")
axs[0].scatter(sources[0], sources[1], marker="*", s=150, c="r", edgecolors="k")
axs[0].axis("tight")
axs[0].set_xlabel("x [m]"), axs[0].set_ylabel("z [m]")
axs[0].set_title("Velocity")
axs[0].set_xlim(x[0], x[-1])
axs[1].imshow(refl.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]))
axs[1].scatter(recs[0], recs[1], marker="v", s=150, c="b", edgecolors="k")
axs[1].scatter(sources[0], sources[1], marker="*", s=150, c="r", edgecolors="k")
axs[1].axis("tight")
axs[1].set_xlabel("x [m]")
axs[1].set_title("Reflectivity")
axs[1].set_xlim(x[0], x[-1])
axs[2].imshow(psfrefl.T, cmap="gray_r", extent=(x[0], x[-1], z[-1], z[0]))
axs[2].scatter(recs[0], recs[1], marker="v", s=150, c="b", edgecolors="k")
axs[2].scatter(sources[0], sources[1], marker="*", s=150, c="r", edgecolors="k")
axs[2].axis("tight")
axs[2].set_xlabel("x [m]")
axs[2].set_title("PSF Reflectivity")
axs[2].set_xlim(x[0], x[-1])
plt.tight_layout()
###############################################################################
# We can now create our Kirchhoff modelling object which we will use to model
# and migrate the data, as well as to model and migrate the PSF model.
nt = 151
dt = 0.004
t = np.arange(nt) * dt
wav, wavt, wavc = pylops.utils.wavelets.ricker(t[:41], f0=20)
kop = pylops.waveeqprocessing.Kirchhoff(
z,
x,
t,
sources,
recs,
v0,
wav,
wavc,
mode="analytic",
dynamic=False,
wavfilter=True,
engine="numba",
)
kopdyn = pylops.waveeqprocessing.Kirchhoff(
z,
x,
t,
sources,
recs,
v0,
wav,
wavc,
mode="analytic",
dynamic=True,
wavfilter=True,
aperture=2,
angleaperture=50,
engine="numba",
)
d = kop @ refl
mmig = kopdyn.H @ d
dpsf = kop @ psfrefl
mmigpsf = kopdyn.H @ dpsf
fig, axs = plt.subplots(1, 2, figsize=(10, 6))
axs[0].imshow(
dpsf[ns // 2, :, :].T,
extent=(rx[0], rx[-1], t[-1], t[0]),
cmap="gray",
vmin=-200,
vmax=200,
)
axs[0].axis("tight")
axs[0].set_xlabel("x [m]"), axs[0].set_ylabel("t [m]")
axs[0].set_title(r"$d_{psf}$")
axs[1].imshow(
mmigpsf.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-200, vmax=200
)
axs[1].scatter(Psfx.ravel() * dx, Psfz.ravel() * dz, c="r")
axs[1].set_xlabel("x [m]"), axs[1].set_ylabel("z [m]")
axs[1].set_title(r"$m_{psf}$")
axs[1].axis("tight")
plt.tight_layout()
###############################################################################
# We can now extract the local PSFs and create the 2-dimensional
# non-stationary filtering operator
psfsize = (21, 21)
psfs = np.zeros((len(psfx), len(psfz), *psfsize))
for ipx, px in enumerate(psfx):
for ipz, pz in enumerate(psfz):
psfs[ipx, ipz] = mmigpsf[
int(px - psfsize[0] // 2) : int(px + psfsize[0] // 2 + 1),
int(pz - psfsize[1] // 2) : int(pz + psfsize[1] // 2 + 1),
]
fig, axs = plt.subplots(2, 1, figsize=(10, 5))
axs[0].imshow(
psfs[:, 0].reshape(len(psfx) * psfsize[0], psfsize[1]).T,
cmap="gray",
vmin=-200,
vmax=200,
)
axs[0].set_title(r"$m_{psf}$ iz=0")
axs[0].axis("tight")
axs[1].imshow(
psfs[:, 1].reshape(len(psfx) * psfsize[0], psfsize[1]).T,
cmap="gray",
vmin=-200,
vmax=200,
)
axs[1].set_title(r"$m_{psf}$ iz=1")
axs[1].axis("tight")
plt.tight_layout()
Cop = pylops.signalprocessing.NonStationaryConvolve2D(
hs=psfs, ihx=psfx, ihz=psfz, dims=(nx, nz), engine="numba"
)
mmigpsf = Cop @ refl
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(
mmig.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-1e3, vmax=1e3
)
axs[0].set_title(r"$m_{mig}$")
axs[0].axis("tight")
axs[1].imshow(
mmigpsf.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-1e3, vmax=1e3
)
axs[1].set_title(r"$m_{mig, psf}$")
axs[1].axis("tight")
plt.tight_layout()
###############################################################################
# Finally, we are ready to invert our seismic image for its corresponding
# reflectivity using the :py:func:`pylops.optimization.sparsity.fista` solver.
minv, _, resnorm = pylops.optimization.sparsity.fista(
Cop, mmig.ravel(), eps=1e5, niter=100, eigsdict=dict(niter=5, tol=1e-2), show=True
)
minv = minv.reshape(nx, nz)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(
mmig.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-500, vmax=500
)
axs[0].set_title(r"$m_{mig}$")
axs[0].axis("tight")
axs[1].imshow(minv.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-1, vmax=1)
axs[1].set_title(r"$m_{inv}$")
axs[1].axis("tight")
plt.tight_layout()
###############################################################################
# For a more advanced set of examples of both reflectivity and impedance
# image-domain LSM head over to this
# `notebook <https://github.com/mrava87/pylops_notebooks/blob/master/developement/LeastSquaresMigration_imagedomainmarmousi.ipynb>`_.
| 8,191 | 30.875486 | 133 | py |
pylops | pylops-master/tutorials/bayesian.py | r"""
04. Bayesian Inversion
======================
This tutorial focuses on Bayesian inversion, a special type of inverse problem
that aims at incorporating prior information in terms of model and data
probabilities in the inversion process.
In this case we will be dealing with the same problem that we discussed in
:ref:`sphx_glr_tutorials_solvers.py`, but instead of defining ad-hoc
regularization or preconditioning terms we parametrize and model our input
signal in the frequency domain in a probabilistic fashion: the central
frequency, amplitude and phase of the three sinusoids have gaussian
distributions as follows:
.. math::
X(f) = \sum_{i=1}^3 a_i e^{j \phi_i} \delta(f - f_i)
where :math:`f_i \sim N(f_{0,i}, \sigma_{f,i})`,
:math:`a_i \sim N(a_{0,i}, \sigma_{a,i})`, and
:math:`\phi_i \sim N(\phi_{0,i}, \sigma_{\phi,i})`.
Based on the above definition, we construct some prior models in the frequency
domain, convert each of them to the time domain and use such an ensemble
to estimate the prior mean :math:`\mu_\mathbf{x}` and model
covariance :math:`\mathbf{C_x}`.
We then create our data by sampling the true signal at certain locations
and solve the resconstruction problem within a Bayesian framework. Since we are
assuming gaussianity in our priors, the equation to obtain the posterion mean
can be derived analytically:
.. math::
\mathbf{x} = \mathbf{x_0} + \mathbf{C}_x \mathbf{R}^T
(\mathbf{R} \mathbf{C}_x \mathbf{R}^T + \mathbf{C}_y)^{-1} (\mathbf{y} -
\mathbf{R} \mathbf{x_0})
"""
import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 2
import numpy as np
from scipy.sparse.linalg import lsqr
import pylops
plt.close("all")
np.random.seed(10)
###############################################################################
# Let's start by creating our true model and prior realizations
def prior_realization(f0, a0, phi0, sigmaf, sigmaa, sigmaphi, dt, nt, nfft):
"""Create realization from prior mean and std for amplitude, frequency and
phase
"""
f = np.fft.rfftfreq(nfft, dt)
df = f[1] - f[0]
ifreqs = [int(np.random.normal(f, sigma) / df) for f, sigma in zip(f0, sigmaf)]
amps = [np.random.normal(a, sigma) for a, sigma in zip(a0, sigmaa)]
phis = [np.random.normal(phi, sigma) for phi, sigma in zip(phi0, sigmaphi)]
# input signal in frequency domain
X = np.zeros(nfft // 2 + 1, dtype="complex128")
X[ifreqs] = (
np.array(amps).squeeze() * np.exp(1j * np.deg2rad(np.array(phis))).squeeze()
)
# input signal in time domain
FFTop = pylops.signalprocessing.FFT(nt, nfft=nfft, real=True)
x = FFTop.H * X
return x
# Priors
nreals = 100
f0 = [5, 3, 8]
sigmaf = [0.5, 1.0, 0.6]
a0 = [1.0, 1.0, 1.0]
sigmaa = [0.1, 0.5, 0.6]
phi0 = [-90.0, 0.0, 0.0]
sigmaphi = [0.1, 0.2, 0.4]
sigmad = 1e-2
# Prior models
nt = 200
nfft = 2**11
dt = 0.004
t = np.arange(nt) * dt
xs = np.array(
[
prior_realization(f0, a0, phi0, sigmaf, sigmaa, sigmaphi, dt, nt, nfft)
for _ in range(nreals)
]
)
# True model (taken as one possible realization)
x = prior_realization(f0, a0, phi0, [0, 0, 0], [0, 0, 0], [0, 0, 0], dt, nt, nfft)
###############################################################################
# We have now a set of prior models in time domain. We can easily use sample
# statistics to estimate the prior mean and covariance. For the covariance, we
# perform a second step where we average values around the main
# diagonal for each row and find a smooth, compact filter that we use to
# define a convolution linear operator that mimics the action of the covariance
# matrix on a vector
x0 = np.average(xs, axis=0)
Cm = ((xs - x0).T @ (xs - x0)) / nreals
N = 30 # lenght of decorrelation
diags = np.array([Cm[i, i - N : i + N + 1] for i in range(N, nt - N)])
diag_ave = np.average(diags, axis=0)
# add a taper at the end to avoid edge effects
diag_ave *= np.hamming(2 * N + 1)
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
ax.plot(t, xs.T, "r", lw=1)
ax.plot(t, x0, "g", lw=4)
ax.plot(t, x, "k", lw=4)
ax.set_title("Prior realizations and mean")
ax.set_xlim(0, 0.8)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))
im = ax1.imshow(
Cm, interpolation="nearest", cmap="seismic", extent=(t[0], t[-1], t[-1], t[0])
)
ax1.set_title(r"$\mathbf{C}_m^{prior}$")
ax1.axis("tight")
ax2.plot(np.arange(-N, N + 1) * dt, diags.T, "--r", lw=1)
ax2.plot(np.arange(-N, N + 1) * dt, diag_ave, "k", lw=4)
ax2.set_title("Averaged covariance 'filter'")
plt.tight_layout()
###############################################################################
# Let's define now the sampling operator as well as create our covariance
# matrices in terms of linear operators. This may not be strictly necessary
# here but shows how even Bayesian-type of inversion can very easily scale to
# large model and data spaces.
# Sampling operator
perc_subsampling = 0.2
ntsub = int(np.round(nt * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(nt))[:ntsub])
iava[-1] = nt - 1 # assume we have the last sample to avoid instability
Rop = pylops.Restriction(nt, iava, dtype="float64")
# Covariance operators
Cm_op = pylops.signalprocessing.Convolve1D(nt, diag_ave, offset=N)
Cd_op = sigmad**2 * pylops.Identity(ntsub)
###############################################################################
# We model now our data and add noise that respects our prior definition
n = np.random.normal(0, sigmad, nt)
y = Rop * x
yn = Rop * (x + n)
ymask = Rop.mask(x)
ynmask = Rop.mask(x + n)
###############################################################################
# First we apply the Bayesian inversion equation
xbayes = x0 + Cm_op * Rop.H * (
lsqr(Rop * Cm_op * Rop.H + Cd_op, yn - Rop * x0, iter_lim=400)[0]
)
# Visualize
fig, ax = plt.subplots(1, 1, figsize=(12, 5))
ax.plot(t, x, "k", lw=6, label="true")
ax.plot(t, ymask, ".k", ms=25, label="available samples")
ax.plot(t, ynmask, ".r", ms=25, label="available noisy samples")
ax.plot(t, xbayes, "r", lw=3, label="bayesian inverse")
ax.legend()
ax.set_title("Signal")
ax.set_xlim(0, 0.8)
plt.tight_layout()
###############################################################################
# So far we have been able to estimate our posterion mean. What about its
# uncertainties (i.e., posterion covariance)?
#
# In real-life applications it is very difficult (if not impossible)
# to directly compute the posterior covariance matrix. It is much more
# useful to create a set of models that sample the posterion probability.
# We can do that by solving our problem several times using different prior
# realizations as starting guesses:
xpost = [
x0
+ Cm_op
* Rop.H
* (lsqr(Rop * Cm_op * Rop.H + Cd_op, yn - Rop * x0, iter_lim=400)[0])
for x0 in xs[:30]
]
xpost = np.array(xpost)
x0post = np.average(xpost, axis=0)
Cm_post = ((xpost - x0post).T @ (xpost - x0post)) / nreals
# Visualize
fig, ax = plt.subplots(1, 1, figsize=(12, 5))
ax.plot(t, x, "k", lw=6, label="true")
ax.plot(t, xpost.T, "--r", lw=1)
ax.plot(t, x0post, "r", lw=3, label="bayesian inverse")
ax.plot(t, ymask, ".k", ms=25, label="available samples")
ax.plot(t, ynmask, ".r", ms=25, label="available noisy samples")
ax.legend()
ax.set_title("Signal")
ax.set_xlim(0, 0.8)
fig, ax = plt.subplots(1, 1, figsize=(5, 4))
im = ax.imshow(
Cm_post, interpolation="nearest", cmap="seismic", extent=(t[0], t[-1], t[-1], t[0])
)
ax.set_title(r"$\mathbf{C}_m^{posterior}$")
ax.axis("tight")
plt.tight_layout()
###############################################################################
# Note that here we have been able to compute a sample posterior covariance
# from its estimated samples. By displaying it we can see how both the overall
# variances and the correlation between different parameters have become
# narrower compared to their prior counterparts.
| 7,878 | 34.490991 | 87 | py |
pylops | pylops-master/tutorials/poststack.py | r"""
07. Post-stack inversion
========================
Estimating subsurface properties from band-limited seismic data represents an
important task for geophysical subsurface characterization.
In this tutorial, the :py:class:`pylops.avo.poststack.PoststackLinearModelling`
operator is used for modelling of both 1d and 2d synthetic post-stack seismic
data from a profile or 2d model of the subsurface acoustic impedence.
.. math::
d(t, \theta=0) = \frac{1}{2} w(t) * \frac{\mathrm{d}\ln \text{AI}(t)}{\mathrm{d}t}
where :math:`\text{AI}(t)` is the acoustic impedance profile and :math:`w(t)` is
the time domain seismic wavelet. In compact form:
.. math::
\mathbf{d}= \mathbf{W} \mathbf{D} \mathbf{ai}
where :math:`\mathbf{W}` is a convolution operator, :math:`\mathbf{D}` is a
first derivative operator, and :math:`\mathbf{ai}` is the input model.
Subsequently the acoustic impedance model is estimated via the
:py:class:`pylops.avo.poststack.PoststackInversion` module. A two-steps
inversion strategy is finally presented to deal with the case of noisy data.
"""
import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 4
import numpy as np
from scipy.signal import filtfilt
import pylops
from pylops.utils.wavelets import ricker
plt.close("all")
np.random.seed(10)
###############################################################################
# Let's start with a 1d example. A synthetic profile of acoustic impedance
# is created and data is modelled using both the dense and linear operator
# version of :py:class:`pylops.avo.poststack.PoststackLinearModelling`
# operator.
# model
nt0 = 301
dt0 = 0.004
t0 = np.arange(nt0) * dt0
vp = 1200 + np.arange(nt0) + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 80, nt0))
rho = 1000 + vp + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 30, nt0))
vp[131:] += 500
rho[131:] += 100
m = np.log(vp * rho)
# smooth model
nsmooth = 100
mback = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, m)
# wavelet
ntwav = 41
wav, twav, wavc = ricker(t0[: ntwav // 2 + 1], 20)
# dense operator
PPop_dense = pylops.avo.poststack.PoststackLinearModelling(
wav / 2, nt0=nt0, explicit=True
)
# lop operator
PPop = pylops.avo.poststack.PoststackLinearModelling(wav / 2, nt0=nt0)
# data
d_dense = PPop_dense * m.ravel()
d = PPop * m
# add noise
dn_dense = d_dense + np.random.normal(0, 2e-2, d_dense.shape)
###############################################################################
# We can now estimate the acoustic profile from band-limited data using either
# the dense operator or linear operator.
# solve dense
minv_dense = pylops.avo.poststack.PoststackInversion(
d, wav / 2, m0=mback, explicit=True, simultaneous=False
)[0]
# solve lop
minv = pylops.avo.poststack.PoststackInversion(
d_dense,
wav / 2,
m0=mback,
explicit=False,
simultaneous=False,
**dict(iter_lim=2000)
)[0]
# solve noisy
mn = pylops.avo.poststack.PoststackInversion(
dn_dense, wav / 2, m0=mback, explicit=True, epsR=1e0, **dict(damp=1e-1)
)[0]
fig, axs = plt.subplots(1, 2, figsize=(6, 7), sharey=True)
axs[0].plot(d_dense, t0, "k", lw=4, label="Dense")
axs[0].plot(d, t0, "--r", lw=2, label="Lop")
axs[0].plot(dn_dense, t0, "-.g", lw=2, label="Noisy")
axs[0].set_title("Data")
axs[0].invert_yaxis()
axs[0].axis("tight")
axs[0].legend(loc=1)
axs[1].plot(m, t0, "k", lw=4, label="True")
axs[1].plot(mback, t0, "--b", lw=4, label="Back")
axs[1].plot(minv_dense, t0, "--m", lw=2, label="Inv Dense")
axs[1].plot(minv, t0, "--r", lw=2, label="Inv Lop")
axs[1].plot(mn, t0, "--g", lw=2, label="Inv Noisy")
axs[1].set_title("Model")
axs[1].axis("tight")
axs[1].legend(loc=1)
plt.tight_layout()
###############################################################################
# We see how inverting a dense matrix is in this case faster than solving
# for the linear operator (a good estimate of the model is in fact obtained
# only after 2000 iterations of lsqr). Nevertheless, having a linear operator
# is useful when we deal with larger dimensions (2d or 3d) and we want to
# couple our modelling operator with different types of spatial regularizations
# or preconditioning.
#
# Before we move onto a 2d example, let's consider the case of non-stationary
# wavelet and see how we can easily use the same routines in this case
# wavelet
ntwav = 41
f0s = np.flip(np.arange(nt0) * 0.05 + 3)
wavs = np.array([ricker(t0[:ntwav], f0)[0] for f0 in f0s])
wavc = np.argmax(wavs[0])
plt.figure(figsize=(5, 4))
plt.imshow(wavs.T, cmap="gray", extent=(t0[0], t0[-1], t0[ntwav], -t0[ntwav]))
plt.xlabel("t")
plt.title("Wavelets")
plt.axis("tight")
# operator
PPop = pylops.avo.poststack.PoststackLinearModelling(wavs / 2, nt0=nt0, explicit=True)
# data
d = PPop * m
# solve
minv = pylops.avo.poststack.PoststackInversion(
d, wavs / 2, m0=mback, explicit=True, **dict(cond=1e-10)
)[0]
fig, axs = plt.subplots(1, 2, figsize=(6, 7), sharey=True)
axs[0].plot(d, t0, "k", lw=4)
axs[0].set_title("Data")
axs[0].invert_yaxis()
axs[0].axis("tight")
axs[1].plot(m, t0, "k", lw=4, label="True")
axs[1].plot(mback, t0, "--b", lw=4, label="Back")
axs[1].plot(minv, t0, "--r", lw=2, label="Inv")
axs[1].set_title("Model")
axs[1].axis("tight")
axs[1].legend(loc=1)
plt.tight_layout()
###############################################################################
# We move now to a 2d example. First of all the model is loaded and
# data generated.
# model
inputfile = "../testdata/avo/poststack_model.npz"
model = np.load(inputfile)
m = np.log(model["model"][:, ::3])
x, z = model["x"][::3] / 1000.0, model["z"] / 1000.0
nx, nz = len(x), len(z)
# smooth model
nsmoothz, nsmoothx = 60, 50
mback = filtfilt(np.ones(nsmoothz) / float(nsmoothz), 1, m, axis=0)
mback = filtfilt(np.ones(nsmoothx) / float(nsmoothx), 1, mback, axis=1)
# dense operator
PPop_dense = pylops.avo.poststack.PoststackLinearModelling(
wav / 2, nt0=nz, spatdims=nx, explicit=True
)
# lop operator
PPop = pylops.avo.poststack.PoststackLinearModelling(wav / 2, nt0=nz, spatdims=nx)
# data
d = (PPop_dense * m.ravel()).reshape(nz, nx)
n = np.random.normal(0, 1e-1, d.shape)
dn = d + n
###############################################################################
# Finally we perform 4 different inversions:
#
# * trace-by-trace inversion with explicit solver and dense operator with
# noise-free data
#
# * trace-by-trace inversion with explicit solver and dense operator
# with noisy data
#
# * multi-trace regularized inversion with iterative solver and linear operator
# using the result of trace-by-trace inversion as starting guess
#
# .. math::
# J = ||\Delta \mathbf{d} - \mathbf{W} \Delta \mathbf{ai}||_2 +
# \epsilon_\nabla ^2 ||\nabla \mathbf{ai}||_2
#
# where :math:`\Delta \mathbf{d}=\mathbf{d}-\mathbf{W}\mathbf{AI_0}` is
# the residual data
#
# * multi-trace blocky inversion with iterative solver and linear operator
# dense inversion with noise-free data
minv_dense = pylops.avo.poststack.PoststackInversion(
d, wav / 2, m0=mback, explicit=True, simultaneous=False
)[0]
# dense inversion with noisy data
minv_dense_noisy = pylops.avo.poststack.PoststackInversion(
dn, wav / 2, m0=mback, explicit=True, epsI=4e-2, simultaneous=False
)[0]
# spatially regularized lop inversion with noisy data
minv_lop_reg = pylops.avo.poststack.PoststackInversion(
dn,
wav / 2,
m0=minv_dense_noisy,
explicit=False,
epsR=5e1,
**dict(damp=np.sqrt(1e-4), iter_lim=80)
)[0]
# blockiness promoting inversion with noisy data
minv_lop_blocky = pylops.avo.poststack.PoststackInversion(
dn,
wav / 2,
m0=mback,
explicit=False,
epsR=[0.4],
epsRL1=[0.1],
**dict(mu=0.1, niter_outer=5, niter_inner=10, iter_lim=5, damp=1e-3)
)[0]
fig, axs = plt.subplots(2, 4, figsize=(15, 9))
axs[0][0].imshow(d, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-0.4, vmax=0.4)
axs[0][0].set_title("Data")
axs[0][0].axis("tight")
axs[0][1].imshow(
dn, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-0.4, vmax=0.4
)
axs[0][1].set_title("Noisy Data")
axs[0][1].axis("tight")
axs[0][2].imshow(
m,
cmap="gist_rainbow",
extent=(x[0], x[-1], z[-1], z[0]),
vmin=m.min(),
vmax=m.max(),
)
axs[0][2].set_title("Model")
axs[0][2].axis("tight")
axs[0][3].imshow(
mback,
cmap="gist_rainbow",
extent=(x[0], x[-1], z[-1], z[0]),
vmin=m.min(),
vmax=m.max(),
)
axs[0][3].set_title("Smooth Model")
axs[0][3].axis("tight")
axs[1][0].imshow(
minv_dense,
cmap="gist_rainbow",
extent=(x[0], x[-1], z[-1], z[0]),
vmin=m.min(),
vmax=m.max(),
)
axs[1][0].set_title("Noise-free Inversion")
axs[1][0].axis("tight")
axs[1][1].imshow(
minv_dense_noisy,
cmap="gist_rainbow",
extent=(x[0], x[-1], z[-1], z[0]),
vmin=m.min(),
vmax=m.max(),
)
axs[1][1].set_title("Trace-by-trace Noisy Inversion")
axs[1][1].axis("tight")
axs[1][2].imshow(
minv_lop_reg,
cmap="gist_rainbow",
extent=(x[0], x[-1], z[-1], z[0]),
vmin=m.min(),
vmax=m.max(),
)
axs[1][2].set_title("Regularized Noisy Inversion - lop ")
axs[1][2].axis("tight")
axs[1][3].imshow(
minv_lop_blocky,
cmap="gist_rainbow",
extent=(x[0], x[-1], z[-1], z[0]),
vmin=m.min(),
vmax=m.max(),
)
axs[1][3].set_title("Blocky Noisy Inversion - lop ")
axs[1][3].axis("tight")
fig, ax = plt.subplots(1, 1, figsize=(3, 7))
ax.plot(m[:, nx // 2], z, "k", lw=4, label="True")
ax.plot(mback[:, nx // 2], z, "--r", lw=4, label="Back")
ax.plot(minv_dense[:, nx // 2], z, "--b", lw=2, label="Inv Dense")
ax.plot(minv_dense_noisy[:, nx // 2], z, "--m", lw=2, label="Inv Dense noisy")
ax.plot(minv_lop_reg[:, nx // 2], z, "--g", lw=2, label="Inv Lop regularized")
ax.plot(minv_lop_blocky[:, nx // 2], z, "--y", lw=2, label="Inv Lop blocky")
ax.set_title("Model")
ax.invert_yaxis()
ax.axis("tight")
ax.legend()
plt.tight_layout()
###############################################################################
# That's almost it. If you wonder how this can be applied to real data,
# head over to the following `notebook
# <https://github.com/equinor/segyio-notebooks/blob/master/notebooks/pylops/01_seismic_inversion.ipynb>`_
# where the open-source `segyio <https://github.com/equinor/segyio>`_ library
# is used alongside pylops to create an end-to-end open-source seismic
# inversion workflow with SEG-Y input data.
| 10,369 | 30.141141 | 105 | py |
pylops | pylops-master/tutorials/wavefielddecomposition.py | r"""
14. Seismic wavefield decomposition
===================================
Multi-component seismic data can be decomposed
in their up- and down-going constituents in a purely data driven fashion.
This task can be accurately achieved by linearly combining the input pressure
and particle velocity data in the frequency-wavenumber described in details in
:func:`pylops.waveeqprocessing.UpDownComposition2D` and
:func:`pylops.waveeqprocessing.WavefieldDecomposition`.
In this tutorial we will consider a simple synthetic data composed of six
events (three up-going and three down-going). We will first combine them to
create pressure and particle velocity data and then show how we can retrieve
their directional constituents both by directly combining the input data
as well as by setting an inverse problem. The latter approach results vital in
case of spatial aliasing, as applying simple scaled summation in the
frequency-wavenumber would result in sub-optimal decomposition due to the
superposition of different frequency-wavenumber pairs at some (aliased)
locations.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import filtfilt
import pylops
from pylops.utils.seismicevents import hyperbolic2d, makeaxis
from pylops.utils.wavelets import ricker
np.random.seed(0)
plt.close("all")
###############################################################################
# Let's first the input up- and down-going wavefields
par = {"ox": -220, "dx": 5, "nx": 89, "ot": 0, "dt": 0.004, "nt": 200, "f0": 40}
t0_plus = np.array([0.2, 0.5, 0.7])
t0_minus = t0_plus + 0.04
vrms = np.array([1400.0, 1500.0, 2000.0])
amp = np.array([1.0, -0.6, 0.5])
vel_sep = 1000.0 # velocity at separation level
rho_sep = 1000.0 # density at separation level
# Create axis
t, t2, x, y = makeaxis(par)
# Create wavelet
wav = ricker(t[:41], f0=par["f0"])[0]
# Create data
_, p_minus = hyperbolic2d(x, t, t0_minus, vrms, amp, wav)
_, p_plus = hyperbolic2d(x, t, t0_plus, vrms, amp, wav)
###############################################################################
# We can now combine them to create pressure and particle velocity data
critical = 1.1
ntaper = 51
nfft = 2**10
# 2d fft operator
FFTop = pylops.signalprocessing.FFT2D(
dims=[par["nx"], par["nt"]], nffts=[nfft, nfft], sampling=[par["dx"], par["dt"]]
)
# obliquity factor
[Kx, F] = np.meshgrid(FFTop.f1, FFTop.f2, indexing="ij")
k = F / vel_sep
Kz = np.sqrt((k**2 - Kx**2).astype(np.complex128))
Kz[np.isnan(Kz)] = 0
OBL = rho_sep * (np.abs(F) / Kz)
OBL[Kz == 0] = 0
mask = np.abs(Kx) < critical * np.abs(F) / vel_sep
OBL *= mask
OBL = filtfilt(np.ones(ntaper) / float(ntaper), 1, OBL, axis=0)
OBL = filtfilt(np.ones(ntaper) / float(ntaper), 1, OBL, axis=1)
# composition operator
UPop = pylops.waveeqprocessing.UpDownComposition2D(
par["nt"],
par["nx"],
par["dt"],
par["dx"],
rho_sep,
vel_sep,
nffts=(nfft, nfft),
critical=critical * 100.0,
ntaper=ntaper,
dtype="complex128",
)
# wavefield modelling
d = UPop * np.concatenate((p_plus.ravel(), p_minus.ravel())).ravel()
d = np.real(d.reshape(2 * par["nx"], par["nt"]))
p, vz = d[: par["nx"]], d[par["nx"] :]
# obliquity scaled vz
VZ = FFTop * vz.ravel()
VZ = VZ.reshape(nfft, nfft)
VZ_obl = OBL * VZ
vz_obl = FFTop.H * VZ_obl.ravel()
vz_obl = np.real(vz_obl.reshape(par["nx"], par["nt"]))
fig, axs = plt.subplots(1, 4, figsize=(10, 5))
axs[0].imshow(
p.T,
aspect="auto",
vmin=-1,
vmax=1,
interpolation="nearest",
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_title(r"$p$", fontsize=15)
axs[0].set_xlabel("x")
axs[0].set_ylabel("t")
axs[1].imshow(
vz_obl.T,
aspect="auto",
vmin=-1,
vmax=1,
interpolation="nearest",
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[1].set_title(r"$v_z^{obl}$", fontsize=15)
axs[1].set_xlabel("x")
axs[1].set_ylabel("t")
axs[2].imshow(
p_plus.T,
aspect="auto",
vmin=-1,
vmax=1,
interpolation="nearest",
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[2].set_title(r"$p^+$", fontsize=15)
axs[2].set_xlabel("x")
axs[2].set_ylabel("t")
axs[3].imshow(
p_minus.T,
aspect="auto",
interpolation="nearest",
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
vmin=-1,
vmax=1,
)
axs[3].set_title(r"$p^-$", fontsize=15)
axs[3].set_xlabel("x")
axs[3].set_ylabel("t")
plt.tight_layout()
###############################################################################
# Wavefield separation is first performed using the analytical expression
# for combining pressure and particle velocity data in the wavenumber-frequency
# domain
pup_sep, pdown_sep = pylops.waveeqprocessing.WavefieldDecomposition(
p,
vz,
par["nt"],
par["nx"],
par["dt"],
par["dx"],
rho_sep,
vel_sep,
nffts=(nfft, nfft),
kind="analytical",
critical=critical * 100,
ntaper=ntaper,
dtype="complex128",
)
fig = plt.figure(figsize=(12, 5))
axs0 = plt.subplot2grid((2, 5), (0, 0), rowspan=2)
axs1 = plt.subplot2grid((2, 5), (0, 1), rowspan=2)
axs2 = plt.subplot2grid((2, 5), (0, 2), colspan=3)
axs3 = plt.subplot2grid((2, 5), (1, 2), colspan=3)
axs0.imshow(
pup_sep.T, cmap="gray", vmin=-1, vmax=1, extent=(x.min(), x.max(), t.max(), t.min())
)
axs0.set_title(r"$p^-$ analytical")
axs0.axis("tight")
axs1.imshow(
pdown_sep.T,
cmap="gray",
vmin=-1,
vmax=1,
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs1.set_title(r"$p^+$ analytical")
axs1.axis("tight")
axs2.plot(t, p[par["nx"] // 2], "r", lw=2, label=r"$p$")
axs2.plot(t, vz_obl[par["nx"] // 2], "--b", lw=2, label=r"$v_z^{obl}$")
axs2.set_ylim(-1, 1)
axs2.set_title("Data at x=%.2f" % x[par["nx"] // 2])
axs2.set_xlabel("t [s]")
axs2.legend()
axs3.plot(t, pup_sep[par["nx"] // 2], "r", lw=2, label=r"$p^-$ ana")
axs3.plot(t, pdown_sep[par["nx"] // 2], "--b", lw=2, label=r"$p^+$ ana")
axs3.set_title("Separated wavefields at x=%.2f" % x[par["nx"] // 2])
axs3.set_xlabel("t [s]")
axs3.set_ylim(-1, 1)
axs3.legend()
plt.tight_layout()
###############################################################################
# We repeat the same exercise but this time we invert the composition operator
# :func:`pylops.waveeqprocessing.UpDownComposition2D`
pup_inv, pdown_inv = pylops.waveeqprocessing.WavefieldDecomposition(
p,
vz,
par["nt"],
par["nx"],
par["dt"],
par["dx"],
rho_sep,
vel_sep,
nffts=(nfft, nfft),
kind="inverse",
critical=critical * 100,
ntaper=ntaper,
scaling=1.0 / vz.max(),
dtype="complex128",
**dict(damp=1e-10, iter_lim=20)
)
fig = plt.figure(figsize=(12, 5))
axs0 = plt.subplot2grid((2, 5), (0, 0), rowspan=2)
axs1 = plt.subplot2grid((2, 5), (0, 1), rowspan=2)
axs2 = plt.subplot2grid((2, 5), (0, 2), colspan=3)
axs3 = plt.subplot2grid((2, 5), (1, 2), colspan=3)
axs0.imshow(
pup_inv.T, cmap="gray", vmin=-1, vmax=1, extent=(x.min(), x.max(), t.max(), t.min())
)
axs0.set_title(r"$p^-$ inverse")
axs0.axis("tight")
axs1.imshow(
pdown_inv.T,
cmap="gray",
vmin=-1,
vmax=1,
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs1.set_title(r"$p^+$ inverse")
axs1.axis("tight")
axs2.plot(t, p[par["nx"] // 2], "r", lw=2, label=r"$p$")
axs2.plot(t, vz_obl[par["nx"] // 2], "--b", lw=2, label=r"$v_z^{obl}$")
axs2.set_ylim(-1, 1)
axs2.set_title("Data at x=%.2f" % x[par["nx"] // 2])
axs2.set_xlabel("t [s]")
axs2.legend()
axs3.plot(t, pup_inv[par["nx"] // 2], "r", lw=2, label=r"$p^-$ inv")
axs3.plot(t, pdown_inv[par["nx"] // 2], "--b", lw=2, label=r"$p^+$ inv")
axs3.set_title("Separated wavefields at x=%.2f" % x[par["nx"] // 2])
axs3.set_xlabel("t [s]")
axs3.set_ylim(-1, 1)
axs3.legend()
plt.tight_layout()
###############################################################################
# The up- and down-going constituents have been succesfully separated in both
# cases. Finally, we use the
# :func:`pylops.waveeqprocessing.UpDownComposition2D` operator to reconstruct
# the particle velocity wavefield from its up- and down-going pressure
# constituents
PtoVop = pylops.waveeqprocessing.PressureToVelocity(
par["nt"],
par["nx"],
par["dt"],
par["dx"],
rho_sep,
vel_sep,
nffts=(nfft, nfft),
critical=critical * 100.0,
ntaper=ntaper,
topressure=False,
)
vdown_rec = (PtoVop * pdown_inv.ravel()).reshape(par["nx"], par["nt"])
vup_rec = (PtoVop * pup_inv.ravel()).reshape(par["nx"], par["nt"])
vz_rec = np.real(vdown_rec - vup_rec)
fig, axs = plt.subplots(1, 3, figsize=(13, 6))
axs[0].imshow(
vz.T,
cmap="gray",
vmin=-1e-6,
vmax=1e-6,
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_title(r"$vz$")
axs[0].axis("tight")
axs[1].imshow(
vz_rec.T, cmap="gray", vmin=-1e-6, vmax=1e-6, extent=(x.min(), x.max(), t[-1], t[0])
)
axs[1].set_title(r"$vz rec$")
axs[1].axis("tight")
axs[2].imshow(
vz.T - vz_rec.T,
cmap="gray",
vmin=-1e-6,
vmax=1e-6,
extent=(x.min(), x.max(), t[-1], t[0]),
)
axs[2].set_title(r"$error$")
axs[2].axis("tight")
plt.tight_layout()
###############################################################################
# To see more examples, including applying wavefield separation and
# regularization simultaneously, as well as 3D examples, head over to
# the following notebooks:
# `notebook1 <https://github.com/mrava87/pylops_notebooks/blob/master/developement/WavefieldSeparation.ipynb>`_
# and `notebook2 <https://github.com/mrava87/pylops_notebooks/blob/master/developement/WavefieldSeparation-Synthetic.ipynb>`_
| 9,595 | 28.9875 | 125 | py |
pylops | pylops-master/tutorials/ctscan.py | r"""
16. CT Scan Imaging
===================
This tutorial considers a very well-known inverse problem from the field of
medical imaging.
We will be using the :func:`pylops.signalprocessing.Radon2D` operator
to model a *sinogram*, which is a graphic representation of the raw data
obtained from a CT scan. The sinogram is further inverted using both a L2
solver and a TV-regularized solver like Split-Bregman.
"""
import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 2
import numpy as np
from numba import jit
import pylops
plt.close("all")
np.random.seed(10)
###############################################################################
# Let's start by loading the Shepp-Logan phantom model. We can then construct
# the sinogram by providing a custom-made function to the
# :func:`pylops.signalprocessing.Radon2D` that samples parametric curves of
# such a type:
#
# .. math::
# t(r,\theta; x) = \tan(90°-\theta)x + \frac{r}{\sin(\theta)}
#
# where :math:`\theta` is the angle between the x-axis (:math:`x`) and
# the perpendicular to the summation line and :math:`r` is the distance
# from the origin of the summation line.
@jit(nopython=True)
def radoncurve(x, r, theta):
return (
(r - ny // 2) / (np.sin(theta) + 1e-15)
+ np.tan(np.pi / 2.0 - theta) * x
+ ny // 2
)
x = np.load("../testdata/optimization/shepp_logan_phantom.npy").T
x = x / x.max()
nx, ny = x.shape
ntheta = 151
theta = np.linspace(0.0, np.pi, ntheta, endpoint=False)
RLop = pylops.signalprocessing.Radon2D(
np.arange(ny),
np.arange(nx),
theta,
kind=radoncurve,
centeredh=True,
interp=False,
engine="numba",
dtype="float64",
)
y = RLop.H * x
###############################################################################
# We can now first perform the adjoint, which in the medical imaging literature
# is also referred to as back-projection.
#
# This is the first step of a common reconstruction technique, named filtered
# back-projection, which simply applies a correction filter in the
# frequency domain to the adjoint model.
xrec = RLop * y
fig, axs = plt.subplots(1, 3, figsize=(10, 4))
axs[0].imshow(x.T, vmin=0, vmax=1, cmap="gray")
axs[0].set_title("Model")
axs[0].axis("tight")
axs[1].imshow(y.T, cmap="gray")
axs[1].set_title("Data")
axs[1].axis("tight")
axs[2].imshow(xrec.T, cmap="gray")
axs[2].set_title("Adjoint model")
axs[2].axis("tight")
fig.tight_layout()
###############################################################################
# Finally we take advantage of our different solvers and try to invert the
# modelling operator both in a least-squares sense and using TV-reg.
Dop = [
pylops.FirstDerivative(
(nx, ny), axis=0, edge=True, kind="backward", dtype=np.float64
),
pylops.FirstDerivative(
(nx, ny), axis=1, edge=True, kind="backward", dtype=np.float64
),
]
D2op = pylops.Laplacian(dims=(nx, ny), edge=True, dtype=np.float64)
# L2
xinv_sm = pylops.optimization.leastsquares.regularized_inversion(
RLop.H, y.ravel(), [D2op], epsRs=[1e1], **dict(iter_lim=20)
)[0]
xinv_sm = np.real(xinv_sm.reshape(nx, ny))
# TV
mu = 1.5
lamda = [1.0, 1.0]
niter = 3
niterinner = 4
xinv = pylops.optimization.sparsity.splitbregman(
RLop.H,
y.ravel(),
Dop,
niter_outer=niter,
niter_inner=niterinner,
mu=mu,
epsRL1s=lamda,
tol=1e-4,
tau=1.0,
show=False,
**dict(iter_lim=20, damp=1e-2)
)[0]
xinv = np.real(xinv.reshape(nx, ny))
fig, axs = plt.subplots(1, 3, figsize=(10, 4))
axs[0].imshow(x.T, vmin=0, vmax=1, cmap="gray")
axs[0].set_title("Model")
axs[0].axis("tight")
axs[1].imshow(xinv_sm.T, vmin=0, vmax=1, cmap="gray")
axs[1].set_title("L2 Inversion")
axs[1].axis("tight")
axs[2].imshow(xinv.T, vmin=0, vmax=1, cmap="gray")
axs[2].set_title("TV-Reg Inversion")
axs[2].axis("tight")
fig.tight_layout()
| 3,873 | 26.671429 | 79 | py |
pylops | pylops-master/tutorials/prestack.py | r"""
08. Pre-stack (AVO) inversion
=============================
Pre-stack inversion represents one step beyond post-stack inversion in that
not only the profile of acoustic impedance can be inferred from seismic data,
rather a set of elastic parameters is estimated from pre-stack data
(i.e., angle gathers) using the information contained in the so-called
AVO (amplitude versus offset) response. Such elastic parameters represent
vital information for more sophisticated geophysical subsurface
characterization than it would be possible to achieve working with
post-stack seismic data.
In this tutorial, the :py:class:`pylops.avo.prestack.PrestackLinearModelling`
operator is used for modelling of both 1d and 2d synthetic pre-stack seismic
data using 1d profiles or 2d models of different subsurface elastic parameters
(P-wave velocity, S-wave velocity, and density) as input.
.. math::
d(t, \theta) = w(t) * \sum_{i=1}^N G_i(t, \theta) \frac{\mathrm{d}\ln m_i(t)}{\mathrm{d}t}
where :math:`\mathbf{m}(t)=[V_P(t), V_S(t), \rho(t)]` is a vector containing
three elastic parameters at time :math:`t`, :math:`G_i(t, \theta)` are the
coefficients of the AVO parametrization used to model pre-stack data
and :math:`w(t)` is the time domain seismic wavelet.
In compact form:
.. math::
\mathbf{d}= \mathbf{W} \mathbf{G} \mathbf{D} \mathbf{m}
where :math:`\mathbf{W}` is a convolution operator, :math:`\mathbf{G}` is
the AVO modelling operator, :math:`\mathbf{D}` is a block-diagonal
derivative operator, and :math:`\mathbf{m}` is the input model.
Subsequently the elastic parameters are estimated via the
:py:class:`pylops.avo.prestack.PrestackInversion` module.
Once again, a two-steps inversion strategy can also be used to deal
with the case of noisy data.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import filtfilt
import pylops
from pylops.utils.wavelets import ricker
plt.close("all")
np.random.seed(0)
###############################################################################
# Let's start with a 1d example. A synthetic profile of acoustic impedance
# is created and data is modelled using both the dense and linear operator
# version of :py:class:`pylops.avo.prestack.PrestackLinearModelling` operator
# sphinx_gallery_thumbnail_number = 5
# model
nt0 = 301
dt0 = 0.004
t0 = np.arange(nt0) * dt0
vp = 1200 + np.arange(nt0) + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 80, nt0))
vs = 600 + vp / 2 + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 20, nt0))
rho = 1000 + vp + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 30, nt0))
vp[131:] += 500
vs[131:] += 200
rho[131:] += 100
vsvp = 0.5
m = np.stack((np.log(vp), np.log(vs), np.log(rho)), axis=1)
# background model
nsmooth = 50
mback = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, m, axis=0)
# angles
ntheta = 21
thetamin, thetamax = 0, 40
theta = np.linspace(thetamin, thetamax, ntheta)
# wavelet
ntwav = 41
wav = ricker(t0[: ntwav // 2 + 1], 15)[0]
# lop
PPop = pylops.avo.prestack.PrestackLinearModelling(
wav, theta, vsvp=vsvp, nt0=nt0, linearization="akirich"
)
# dense
PPop_dense = pylops.avo.prestack.PrestackLinearModelling(
wav, theta, vsvp=vsvp, nt0=nt0, linearization="akirich", explicit=True
)
# data lop
dPP = PPop * m.ravel()
dPP = dPP.reshape(nt0, ntheta)
# data dense
dPP_dense = PPop_dense * m.T.ravel()
dPP_dense = dPP_dense.reshape(ntheta, nt0).T
# noisy data
dPPn_dense = dPP_dense + np.random.normal(0, 1e-2, dPP_dense.shape)
###############################################################################
# We can now invert our data and retrieve elastic profiles for both noise-free
# and noisy data using :py:class:`pylops.avo.prestack.PrestackInversion`.
# dense
minv_dense, dPP_dense_res = pylops.avo.prestack.PrestackInversion(
dPP_dense,
theta,
wav,
m0=mback,
linearization="akirich",
explicit=True,
returnres=True,
**dict(cond=1e-10)
)
# lop
minv, dPP_res = pylops.avo.prestack.PrestackInversion(
dPP,
theta,
wav,
m0=mback,
linearization="akirich",
explicit=False,
returnres=True,
**dict(damp=1e-10, iter_lim=2000)
)
# dense noisy
minv_dense_noise, dPPn_dense_res = pylops.avo.prestack.PrestackInversion(
dPPn_dense,
theta,
wav,
m0=mback,
linearization="akirich",
explicit=True,
returnres=True,
**dict(cond=1e-1)
)
# lop noisy (with vertical smoothing)
minv_noise, dPPn_res = pylops.avo.prestack.PrestackInversion(
dPPn_dense,
theta,
wav,
m0=mback,
linearization="akirich",
explicit=False,
epsR=5e-1,
returnres=True,
**dict(damp=1e-1, iter_lim=100)
)
###############################################################################
# The data, inverted models and residuals are now displayed.
# data and model
fig, (axd, axdn, axvp, axvs, axrho) = plt.subplots(1, 5, figsize=(8, 5), sharey=True)
axd.imshow(
dPP_dense,
cmap="gray",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-np.abs(dPP_dense).max(),
vmax=np.abs(dPP_dense).max(),
)
axd.set_title("Data")
axd.axis("tight")
axdn.imshow(
dPPn_dense,
cmap="gray",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-np.abs(dPP_dense).max(),
vmax=np.abs(dPP_dense).max(),
)
axdn.set_title("Noisy Data")
axdn.axis("tight")
axvp.plot(vp, t0, "k", lw=4, label="True")
axvp.plot(np.exp(mback[:, 0]), t0, "--r", lw=4, label="Back")
axvp.plot(np.exp(minv_dense[:, 0]), t0, "--b", lw=2, label="Inv Dense")
axvp.plot(np.exp(minv[:, 0]), t0, "--m", lw=2, label="Inv Lop")
axvp.plot(np.exp(minv_dense_noise[:, 0]), t0, "--c", lw=2, label="Noisy Dense")
axvp.plot(np.exp(minv_noise[:, 0]), t0, "--g", lw=2, label="Noisy Lop")
axvp.set_title(r"$V_P$")
axvs.plot(vs, t0, "k", lw=4, label="True")
axvs.plot(np.exp(mback[:, 1]), t0, "--r", lw=4, label="Back")
axvs.plot(np.exp(minv_dense[:, 1]), t0, "--b", lw=2, label="Inv Dense")
axvs.plot(np.exp(minv[:, 1]), t0, "--m", lw=2, label="Inv Lop")
axvs.plot(np.exp(minv_dense_noise[:, 1]), t0, "--c", lw=2, label="Noisy Dense")
axvs.plot(np.exp(minv_noise[:, 1]), t0, "--g", lw=2, label="Noisy Lop")
axvs.set_title(r"$V_S$")
axrho.plot(rho, t0, "k", lw=4, label="True")
axrho.plot(np.exp(mback[:, 2]), t0, "--r", lw=4, label="Back")
axrho.plot(np.exp(minv_dense[:, 2]), t0, "--b", lw=2, label="Inv Dense")
axrho.plot(np.exp(minv[:, 2]), t0, "--m", lw=2, label="Inv Lop")
axrho.plot(np.exp(minv_dense_noise[:, 2]), t0, "--c", lw=2, label="Noisy Dense")
axrho.plot(np.exp(minv_noise[:, 2]), t0, "--g", lw=2, label="Noisy Lop")
axrho.set_title(r"$\rho$")
axrho.legend(loc="center left", bbox_to_anchor=(1, 0.5))
axd.axis("tight")
plt.tight_layout()
# residuals
fig, axs = plt.subplots(1, 4, figsize=(8, 5), sharey=True)
fig.suptitle("Residuals", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(
dPP_dense_res,
cmap="gray",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-0.1,
vmax=0.1,
)
axs[0].set_title("Dense")
axs[0].set_xlabel(r"$\theta$")
axs[0].set_ylabel("t[s]")
axs[0].axis("tight")
axs[1].imshow(
dPP_res,
cmap="gray",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-0.1,
vmax=0.1,
)
axs[1].set_title("Lop")
axs[1].set_xlabel(r"$\theta$")
axs[1].axis("tight")
axs[2].imshow(
dPPn_dense_res,
cmap="gray",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-0.1,
vmax=0.1,
)
axs[2].set_title("Noisy Dense")
axs[2].set_xlabel(r"$\theta$")
axs[2].axis("tight")
axs[3].imshow(
dPPn_res,
cmap="gray",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-0.1,
vmax=0.1,
)
axs[3].set_title("Noisy Lop")
axs[3].set_xlabel(r"$\theta$")
axs[3].axis("tight")
plt.tight_layout()
plt.subplots_adjust(top=0.85)
###############################################################################
# Finally before moving to the 2d example, we consider the case when both PP
# and PS data are available. A joint PP-PS inversion can be easily solved
# as follows.
PSop = pylops.avo.prestack.PrestackLinearModelling(
2 * wav, theta, vsvp=vsvp, nt0=nt0, linearization="ps"
)
PPPSop = pylops.VStack((PPop, PSop))
# data
dPPPS = PPPSop * m.ravel()
dPPPS = dPPPS.reshape(2, nt0, ntheta)
dPPPSn = dPPPS + np.random.normal(0, 1e-2, dPPPS.shape)
# Invert
minvPPSP, dPPPS_res = pylops.avo.prestack.PrestackInversion(
dPPPS,
theta,
[wav, 2 * wav],
m0=mback,
linearization=["fatti", "ps"],
epsR=5e-1,
returnres=True,
**dict(damp=1e-1, iter_lim=100)
)
# Data and model
fig, (axd, axdn, axvp, axvs, axrho) = plt.subplots(1, 5, figsize=(8, 5), sharey=True)
axd.imshow(
dPPPSn[0],
cmap="gray",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-np.abs(dPPPSn[0]).max(),
vmax=np.abs(dPPPSn[0]).max(),
)
axd.set_title("PP Data")
axd.axis("tight")
axdn.imshow(
dPPPSn[1],
cmap="gray",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-np.abs(dPPPSn[1]).max(),
vmax=np.abs(dPPPSn[1]).max(),
)
axdn.set_title("PS Data")
axdn.axis("tight")
axvp.plot(vp, t0, "k", lw=4, label="True")
axvp.plot(np.exp(mback[:, 0]), t0, "--r", lw=4, label="Back")
axvp.plot(np.exp(minv_noise[:, 0]), t0, "--g", lw=2, label="PP")
axvp.plot(np.exp(minvPPSP[:, 0]), t0, "--b", lw=2, label="PP+PS")
axvp.set_title(r"$V_P$")
axvs.plot(vs, t0, "k", lw=4, label="True")
axvs.plot(np.exp(mback[:, 1]), t0, "--r", lw=4, label="Back")
axvs.plot(np.exp(minv_noise[:, 1]), t0, "--g", lw=2, label="PP")
axvs.plot(np.exp(minvPPSP[:, 1]), t0, "--b", lw=2, label="PP+PS")
axvs.set_title(r"$V_S$")
axrho.plot(rho, t0, "k", lw=4, label="True")
axrho.plot(np.exp(mback[:, 2]), t0, "--r", lw=4, label="Back")
axrho.plot(np.exp(minv_noise[:, 2]), t0, "--g", lw=2, label="PP")
axrho.plot(np.exp(minvPPSP[:, 2]), t0, "--b", lw=2, label="PP+PS")
axrho.set_title(r"$\rho$")
axrho.legend(loc="center left", bbox_to_anchor=(1, 0.5))
axd.axis("tight")
plt.tight_layout()
###############################################################################
# We move now to a 2d example. First of all the model is loaded and
# data generated.
# model
inputfile = "../testdata/avo/poststack_model.npz"
model = np.load(inputfile)
x, z = model["x"][::6] / 1000.0, model["z"][:300] / 1000.0
nx, nz = len(x), len(z)
m = 1000 * model["model"][:300, ::6]
mvp = m.copy()
mvs = m / 2
mrho = m / 3 + 300
m = np.log(np.stack((mvp, mvs, mrho), axis=1))
# smooth model
nsmoothz, nsmoothx = 30, 25
mback = filtfilt(np.ones(nsmoothz) / float(nsmoothz), 1, m, axis=0)
mback = filtfilt(np.ones(nsmoothx) / float(nsmoothx), 1, mback, axis=2)
# dense operator
PPop_dense = pylops.avo.prestack.PrestackLinearModelling(
wav,
theta,
vsvp=vsvp,
nt0=nz,
spatdims=(nx,),
linearization="akirich",
explicit=True,
)
# lop operator
PPop = pylops.avo.prestack.PrestackLinearModelling(
wav, theta, vsvp=vsvp, nt0=nz, spatdims=(nx,), linearization="akirich"
)
# data
dPP = PPop_dense * m.swapaxes(0, 1).ravel()
dPP = dPP.reshape(ntheta, nz, nx).swapaxes(0, 1)
dPPn = dPP + np.random.normal(0, 5e-2, dPP.shape)
###############################################################################
# Finally we perform the same 4 different inversions as in the post-stack
# tutorial (see :ref:`sphx_glr_tutorials_poststack.py` for more details).
# dense inversion with noise-free data
minv_dense = pylops.avo.prestack.PrestackInversion(
dPP, theta, wav, m0=mback, explicit=True, simultaneous=False
)
# dense inversion with noisy data
minv_dense_noisy = pylops.avo.prestack.PrestackInversion(
dPPn, theta, wav, m0=mback, explicit=True, epsI=4e-2, simultaneous=False
)
# spatially regularized lop inversion with noisy data
minv_lop_reg = pylops.avo.prestack.PrestackInversion(
dPPn,
theta,
wav,
m0=minv_dense_noisy,
explicit=False,
epsR=1e1,
**dict(damp=np.sqrt(1e-4), iter_lim=20)
)
# blockiness promoting inversion with noisy data
minv_blocky = pylops.avo.prestack.PrestackInversion(
dPPn,
theta,
wav,
m0=mback,
explicit=False,
epsR=0.4,
epsRL1=0.1,
**dict(mu=0.1, niter_outer=3, niter_inner=3, iter_lim=5, damp=1e-3)
)
###############################################################################
# Let's now visualize the inverted elastic parameters for the different
# scenarios
def plotmodel(
axs,
m,
x,
z,
vmin,
vmax,
params=("VP", "VS", "Rho"),
cmap="gist_rainbow",
title=None,
):
"""Quick visualization of model"""
for ip, param in enumerate(params):
axs[ip].imshow(
m[:, ip], extent=(x[0], x[-1], z[-1], z[0]), vmin=vmin, vmax=vmax, cmap=cmap
)
axs[ip].set_title("%s - %s" % (param, title))
axs[ip].axis("tight")
plt.setp(axs[1].get_yticklabels(), visible=False)
plt.setp(axs[2].get_yticklabels(), visible=False)
# data
fig = plt.figure(figsize=(8, 9))
ax1 = plt.subplot2grid((2, 3), (0, 0), colspan=3)
ax2 = plt.subplot2grid((2, 3), (1, 0))
ax3 = plt.subplot2grid((2, 3), (1, 1), sharey=ax2)
ax4 = plt.subplot2grid((2, 3), (1, 2), sharey=ax2)
ax1.imshow(
dPP[:, 0], cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-0.4, vmax=0.4
)
ax1.vlines(
[x[nx // 5], x[nx // 2], x[4 * nx // 5]],
ymin=z[0],
ymax=z[-1],
colors="w",
linestyles="--",
)
ax1.set_xlabel("x [km]")
ax1.set_ylabel("z [km]")
ax1.set_title(r"Stack ($\theta$=0)")
ax1.axis("tight")
ax2.imshow(
dPP[:, :, nx // 5],
cmap="gray",
extent=(theta[0], theta[-1], z[-1], z[0]),
vmin=-0.4,
vmax=0.4,
)
ax2.set_xlabel(r"$\theta$")
ax2.set_ylabel("z [km]")
ax2.set_title(r"Gather (x=%.2f)" % x[nx // 5])
ax2.axis("tight")
ax3.imshow(
dPP[:, :, nx // 2],
cmap="gray",
extent=(theta[0], theta[-1], z[-1], z[0]),
vmin=-0.4,
vmax=0.4,
)
ax3.set_xlabel(r"$\theta$")
ax3.set_title(r"Gather (x=%.2f)" % x[nx // 2])
ax3.axis("tight")
ax4.imshow(
dPP[:, :, 4 * nx // 5],
cmap="gray",
extent=(theta[0], theta[-1], z[-1], z[0]),
vmin=-0.4,
vmax=0.4,
)
ax4.set_xlabel(r"$\theta$")
ax4.set_title(r"Gather (x=%.2f)" % x[4 * nx // 5])
ax4.axis("tight")
plt.setp(ax3.get_yticklabels(), visible=False)
plt.setp(ax4.get_yticklabels(), visible=False)
# noisy data
fig = plt.figure(figsize=(8, 9))
ax1 = plt.subplot2grid((2, 3), (0, 0), colspan=3)
ax2 = plt.subplot2grid((2, 3), (1, 0))
ax3 = plt.subplot2grid((2, 3), (1, 1), sharey=ax2)
ax4 = plt.subplot2grid((2, 3), (1, 2), sharey=ax2)
ax1.imshow(
dPPn[:, 0], cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-0.4, vmax=0.4
)
ax1.vlines(
[x[nx // 5], x[nx // 2], x[4 * nx // 5]],
ymin=z[0],
ymax=z[-1],
colors="w",
linestyles="--",
)
ax1.set_xlabel("x [km]")
ax1.set_ylabel("z [km]")
ax1.set_title(r"Noisy Stack ($\theta$=0)")
ax1.axis("tight")
ax2.imshow(
dPPn[:, :, nx // 5],
cmap="gray",
extent=(theta[0], theta[-1], z[-1], z[0]),
vmin=-0.4,
vmax=0.4,
)
ax2.set_xlabel(r"$\theta$")
ax2.set_ylabel("z [km]")
ax2.set_title(r"Gather (x=%.2f)" % x[nx // 5])
ax2.axis("tight")
ax3.imshow(
dPPn[:, :, nx // 2],
cmap="gray",
extent=(theta[0], theta[-1], z[-1], z[0]),
vmin=-0.4,
vmax=0.4,
)
ax3.set_title(r"Gather (x=%.2f)" % x[nx // 2])
ax3.set_xlabel(r"$\theta$")
ax3.axis("tight")
ax4.imshow(
dPPn[:, :, 4 * nx // 5],
cmap="gray",
extent=(theta[0], theta[-1], z[-1], z[0]),
vmin=-0.4,
vmax=0.4,
)
ax4.set_xlabel(r"$\theta$")
ax4.set_title(r"Gather (x=%.2f)" % x[4 * nx // 5])
ax4.axis("tight")
plt.setp(ax3.get_yticklabels(), visible=False)
plt.setp(ax4.get_yticklabels(), visible=False)
# inverted models
fig, axs = plt.subplots(6, 3, figsize=(8, 19))
fig.suptitle("Model", fontsize=12, fontweight="bold", y=0.95)
plotmodel(axs[0], m, x, z, m.min(), m.max(), title="True")
plotmodel(axs[1], mback, x, z, m.min(), m.max(), title="Back")
plotmodel(axs[2], minv_dense, x, z, m.min(), m.max(), title="Dense")
plotmodel(axs[3], minv_dense_noisy, x, z, m.min(), m.max(), title="Dense noisy")
plotmodel(axs[4], minv_lop_reg, x, z, m.min(), m.max(), title="Lop regularized")
plotmodel(axs[5], minv_blocky, x, z, m.min(), m.max(), title="Lop blocky")
plt.tight_layout()
plt.subplots_adjust(top=0.92)
fig, axs = plt.subplots(1, 3, figsize=(8, 7))
for ip, param in enumerate(["VP", "VS", "Rho"]):
axs[ip].plot(m[:, ip, nx // 2], z, "k", lw=4, label="True")
axs[ip].plot(mback[:, ip, nx // 2], z, "--r", lw=4, label="Back")
axs[ip].plot(minv_dense[:, ip, nx // 2], z, "--b", lw=2, label="Inv Dense")
axs[ip].plot(
minv_dense_noisy[:, ip, nx // 2], z, "--m", lw=2, label="Inv Dense noisy"
)
axs[ip].plot(
minv_lop_reg[:, ip, nx // 2], z, "--g", lw=2, label="Inv Lop regularized"
)
axs[ip].plot(minv_blocky[:, ip, nx // 2], z, "--y", lw=2, label="Inv Lop blocky")
axs[ip].set_title(param)
axs[ip].invert_yaxis()
axs[2].legend(loc=8, fontsize="small")
plt.tight_layout()
###############################################################################
# While the background model ``m0`` has been provided in all the examples so
# far, it is worth showing that the module
# :py:class:`pylops.avo.prestack.PrestackInversion` can also produce so-called
# relative elastic parameters (i.e., variations from an average medium
# property) when the background model ``m0`` is not available.
dminv = pylops.avo.prestack.PrestackInversion(
dPP, theta, wav, m0=None, explicit=True, simultaneous=False
)
fig, axs = plt.subplots(1, 3, figsize=(8, 3))
plotmodel(axs, dminv, x, z, -dminv.max(), dminv.max(), cmap="seismic", title="relative")
fig, axs = plt.subplots(1, 3, figsize=(8, 7))
for ip, param in enumerate(["VP", "VS", "Rho"]):
axs[ip].plot(dminv[:, ip, nx // 2], z, "k", lw=2)
axs[ip].set_title(param)
axs[ip].invert_yaxis()
| 17,756 | 29.562823 | 94 | py |
pylops | pylops-master/tutorials/linearoperator.py | """
01. The LinearOperator
======================
This first tutorial is aimed at easing the use of the PyLops
library for both new users and developers.
We will start by looking at how to initialize a linear operator as well as
different ways to apply the forward and adjoint operations. Finally we will
investigate various *special methods*, also called *magic methods*
(i.e., methods with the double underscores at the beginning and the end) that
have been implemented for such a class and will allow summing, subtractring,
chaining, etc. multiple operators in very easy and expressive way.
"""
###############################################################################
# Let's start by defining a simple operator that applies element-wise
# multiplication of the model with a vector ``d`` in forward mode and
# element-wise multiplication of the data with the same vector ``d`` in
# adjoint mode. This operator is present in PyLops under the
# name of :py:class:`pylops.Diagonal` and
# its implementation is discussed in more details in the :ref:`addingoperator`
# page.
import timeit
import matplotlib.pyplot as plt
import numpy as np
import pylops
n = 10
d = np.arange(n) + 1.0
x = np.ones(n)
Dop = pylops.Diagonal(d)
###############################################################################
# First of all we apply the operator in the forward mode. This can be done in
# four different ways:
#
# * ``_matvec``: directly applies the method implemented for forward mode
# * ``matvec``: performs some checks before and after applying ``_matvec``
# * ``*``: operator used to map the special method ``__matmul__`` which
# checks whether the input ``x`` is a vector or matrix and applies ``_matvec``
# or ``_matmul`` accordingly.
# * ``@``: operator used to map the special method ``__mul__`` which
# performs like the ``*`` operator
#
# We will time these 4 different executions and see how using ``_matvec``
# (or ``matvec``) will result in the faster computation. It is thus advised to
# use ``*`` (or ``@``) in examples when expressivity has priority but prefer
# ``_matvec`` (or ``matvec``) for efficient implementations.
# setup command
cmd_setup = """\
import numpy as np
import pylops
n = 10
d = np.arange(n) + 1.
x = np.ones(n)
Dop = pylops.Diagonal(d)
DopH = Dop.H
"""
# _matvec
cmd1 = "Dop._matvec(x)"
# matvec
cmd2 = "Dop.matvec(x)"
# @
cmd3 = "Dop@x"
# *
cmd4 = "Dop*x"
# timing
t1 = 1.0e3 * np.array(timeit.repeat(cmd1, setup=cmd_setup, number=500, repeat=5))
t2 = 1.0e3 * np.array(timeit.repeat(cmd2, setup=cmd_setup, number=500, repeat=5))
t3 = 1.0e3 * np.array(timeit.repeat(cmd3, setup=cmd_setup, number=500, repeat=5))
t4 = 1.0e3 * np.array(timeit.repeat(cmd4, setup=cmd_setup, number=500, repeat=5))
plt.figure(figsize=(7, 2))
plt.plot(t1, "k", label=" _matvec")
plt.plot(t2, "r", label="matvec")
plt.plot(t3, "g", label="@")
plt.plot(t4, "b", label="*")
plt.axis("tight")
plt.legend()
plt.tight_layout()
###############################################################################
# Similarly we now consider the adjoint mode. This can be done in
# three different ways:
#
# * ``_rmatvec``: directly applies the method implemented for adjoint mode
# * ``rmatvec``: performs some checks before and after applying ``_rmatvec``
# * ``.H*``: first applies the adjoint ``.H`` which creates a new
# `pylops.linearoperator._AdjointLinearOperator`` where ``_matvec``
# and ``_rmatvec`` are swapped and then applies the new ``_matvec``.
#
# Once again, after timing these 3 different executions we can
# see how using ``_rmatvec`` (or ``rmatvec``) will result in the faster
# computation while ``.H*`` is very unefficient and slow. Note that if the
# adjoint has to be applied multiple times it is at least advised to create
# the adjoint operator by applying ``.H`` only once upfront.
# Not surprisingly, the linear solvers in scipy and PyLops
# actually use ``matvec`` and ``rmatvec`` when dealing with linear operators.
# _rmatvec
cmd1 = "Dop._rmatvec(x)"
# rmatvec
cmd2 = "Dop.rmatvec(x)"
# .H* (pre-computed H)
cmd3 = "DopH*x"
# .H*
cmd4 = "Dop.H*x"
# timing
t1 = 1.0e3 * np.array(timeit.repeat(cmd1, setup=cmd_setup, number=500, repeat=5))
t2 = 1.0e3 * np.array(timeit.repeat(cmd2, setup=cmd_setup, number=500, repeat=5))
t3 = 1.0e3 * np.array(timeit.repeat(cmd3, setup=cmd_setup, number=500, repeat=5))
t4 = 1.0e3 * np.array(timeit.repeat(cmd4, setup=cmd_setup, number=500, repeat=5))
plt.figure(figsize=(7, 2))
plt.plot(t1, "k", label=" _rmatvec")
plt.plot(t2, "r", label="rmatvec")
plt.plot(t3, "g", label=".H* (pre-computed H)")
plt.plot(t4, "b", label=".H*")
plt.axis("tight")
plt.legend()
plt.tight_layout()
###############################################################################
# Just to reiterate once again, it is advised to call ``matvec``
# and ``rmatvec`` unless PyLops linear operators are used for
# teaching purposes.
#
# We now go through some other *methods* and *special methods* that
# are implemented in :py:class:`pylops.LinearOperator`:
#
# * ``Op1+Op2``: maps the special method ``__add__`` and
# performs summation between two operators and
# returns a :py:class:`pylops.LinearOperator`
# * ``-Op``: maps the special method ``__neg__`` and
# performs negation of an operators and
# returns a :py:class:`pylops.LinearOperator`
# * ``Op1-Op2``: maps the special method ``__sub__`` and
# performs summation between two operators and
# returns a :py:class:`pylops.LinearOperator`
# * ``Op1**N``: maps the special method ``__pow__`` and
# performs exponentiation of an operator and
# returns a :py:class:`pylops.LinearOperator`
# * ``Op/y`` (and ``Op.div(y)``): maps the special method ``__truediv__`` and
# performs inversion of an operator
# * ``Op.eigs()``: estimates the eigenvalues of the operator
# * ``Op.cond()``: estimates the condition number of the operator
# * ``Op.conj()``: create complex conjugate operator
Dop = pylops.Diagonal(d)
# +
print(Dop + Dop)
# -
print(-Dop)
print(Dop - 0.5 * Dop)
# **
print(Dop**3)
# * and /
y = Dop * x
print(Dop / y)
# eigs
print(Dop.eigs(neigs=3))
# cond
print(Dop.cond())
# conj
print(Dop.conj())
###############################################################################
# To understand the effect of ``conj`` we need to look into a problem with an
# operator in the complex domain. Let's create again our
# :py:class:`pylops.Diagonal` operator but this time we populate it with
# complex numbers. We will see that the action of the operator and its complex
# conjugate is different even if the model is real.
n = 5
d = 1j * (np.arange(n) + 1.0)
x = np.ones(n)
Dop = pylops.Diagonal(d)
print(f"y = Dx = {Dop * x}")
print(f"y = conj(D)x = {Dop.conj() * x}")
###############################################################################
# At this point, the concept of linear operator may sound abstract.
# The convinience method :func:`pylops.LinearOperator.todense` can be used to
# create the equivalent dense matrix of any operator. In this case for example
# we expect to see a diagonal matrix with ``d`` values along the main diagonal
D = Dop.todense()
plt.figure(figsize=(5, 5))
plt.imshow(np.abs(D))
plt.title("Dense representation of Diagonal operator")
plt.axis("tight")
plt.colorbar()
plt.tight_layout()
###############################################################################
# At this point it is worth reiterating that if two linear operators are
# combined by means of the algebraical operations shown above, the resulting
# operator is still a :py:class:`pylops.LinearOperator` operator. This means
# that we can still apply any of the methods implemented in our class
# like ``*`` or ``/``.
Dop1 = Dop - Dop.conj()
y = Dop1 * x
print(f"x = (Dop - conj(Dop))/y = {Dop1 / y}")
D1 = Dop1.todense()
plt.figure(figsize=(5, 5))
plt.imshow(np.abs(D1))
plt.title(r"Dense representation of $|D - D^*|$")
plt.axis("tight")
plt.colorbar()
plt.tight_layout()
###############################################################################
# Finally, another important feature of PyLops linear operators is that we can
# always keep track of how many times the forward and adjoint passes have been
# applied (and reset when needed). This is particularly useful when running a
# third party solver to see how many evaluations of our operator are performed
# inside the solver.
Dop = pylops.Diagonal(d)
y = Dop.matvec(x)
y = Dop.matvec(x)
y = Dop.rmatvec(y)
print(f"Forward evaluations: {Dop.matvec_count}")
print(f"Adjoint evaluations: {Dop.rmatvec_count}")
# Reset
Dop.reset_count()
print(f"Forward evaluations: {Dop.matvec_count}")
print(f"Adjoint evaluations: {Dop.rmatvec_count}")
###############################################################################
# This first tutorial is completed. You have seen the basic operations that
# can be performed using :py:class:`pylops.LinearOperator` and you
# should now be able to get started combining various PyLops operators and
# solving your own inverse problems.
| 9,016 | 33.680769 | 81 | py |
pylops | pylops-master/tutorials/interpolation.py | r"""
06. 2D Interpolation
====================
In the mathematical field of numerical analysis, interpolation is the problem of constructing new data
points within the range of a discrete set of known data points. In signal and image processing,
the data may be recorded at irregular locations and it is often required to *regularize* the data
into a regular grid.
In this tutorial, an example of 2d interpolation of an image is carried out using a combination
of PyLops operators (:py:class:`pylops.Restriction` and
:py:class:`pylops.Laplacian`) and the :py:mod:`pylops.optimization` module.
Mathematically speaking, if we want to interpolate a signal using the theory of inverse problems,
we can define the following forward problem:
.. math::
\mathbf{y} = \mathbf{R} \mathbf{x}
where the restriction operator :math:`\mathbf{R}` selects :math:`M` elements from
the regularly sampled signal :math:`\mathbf{x}` at random locations.
The input and output signals are:
.. math::
\mathbf{y}= [y_1, y_2,\ldots,y_N]^T, \qquad \mathbf{x}= [x_1, x_2,\ldots,x_M]^T, \qquad
with :math:`M \gg N`.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(0)
###############################################################################
# To start we import a 2d image and define our restriction operator to irregularly and randomly
# sample the image for 30% of the entire grid
im = np.load("../testdata/python.npy")[:, :, 0]
Nz, Nx = im.shape
N = Nz * Nx
# Subsample signal
perc_subsampling = 0.2
Nsub2d = int(np.round(N * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(N))[:Nsub2d])
# Create operators and data
Rop = pylops.Restriction(N, iava, dtype="float64")
D2op = pylops.Laplacian((Nz, Nx), weights=(1, 1), dtype="float64")
x = im.ravel()
y = Rop * x
y1 = Rop.mask(x)
###############################################################################
# We will now use two different routines from our optimization toolbox
# to estimate our original image in the regular grid.
xcg_reg_lop = pylops.optimization.leastsquares.normal_equations_inversion(
Rop, y, [D2op], epsRs=[np.sqrt(0.1)], **dict(maxiter=200)
)[0]
# Invert for interpolated signal, lsqrt
xlsqr_reg_lop = pylops.optimization.leastsquares.regularized_inversion(
Rop,
y,
[D2op],
epsRs=[np.sqrt(0.1)],
**dict(damp=0, iter_lim=200, show=0),
)[0]
# Reshape estimated images
im_sampled = y1.reshape((Nz, Nx))
im_rec_lap_cg = xcg_reg_lop.reshape((Nz, Nx))
im_rec_lap_lsqr = xlsqr_reg_lop.reshape((Nz, Nx))
###############################################################################
# Finally we visualize the original image, the reconstructed images and
# their error
fig, axs = plt.subplots(1, 4, figsize=(12, 4))
fig.suptitle("Data reconstruction - normal eqs", fontsize=14, fontweight="bold", y=0.95)
axs[0].imshow(im, cmap="viridis", vmin=0, vmax=250)
axs[0].axis("tight")
axs[0].set_title("Original")
axs[1].imshow(im_sampled.data, cmap="viridis", vmin=0, vmax=250)
axs[1].axis("tight")
axs[1].set_title("Sampled")
axs[2].imshow(im_rec_lap_cg, cmap="viridis", vmin=0, vmax=250)
axs[2].axis("tight")
axs[2].set_title("2D Regularization")
axs[3].imshow(im - im_rec_lap_cg, cmap="gray", vmin=-80, vmax=80)
axs[3].axis("tight")
axs[3].set_title("2D Regularization Error")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
fig, axs = plt.subplots(1, 4, figsize=(12, 4))
fig.suptitle(
"Data reconstruction - regularized eqs", fontsize=14, fontweight="bold", y=0.95
)
axs[0].imshow(im, cmap="viridis", vmin=0, vmax=250)
axs[0].axis("tight")
axs[0].set_title("Original")
axs[1].imshow(im_sampled.data, cmap="viridis", vmin=0, vmax=250)
axs[1].axis("tight")
axs[1].set_title("Sampled")
axs[2].imshow(im_rec_lap_lsqr, cmap="viridis", vmin=0, vmax=250)
axs[2].axis("tight")
axs[2].set_title("2D Regularization")
axs[3].imshow(im - im_rec_lap_lsqr, cmap="gray", vmin=-80, vmax=80)
axs[3].axis("tight")
axs[3].set_title("2D Regularization Error")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
| 4,061 | 32.85 | 102 | py |
pylops | pylops-master/tutorials/deblurring.py | r"""
05. Image deblurring
====================
*Deblurring* is the process of removing blurring effects from images, caused for
example by defocus aberration or motion blur.
In forward mode, such blurring effect is typically modelled as a 2-dimensional
convolution between the so-called *point spread function* and a target
sharp input image, where the sharp input image (which has to be recovered) is
unknown and the point-spread function can be either known or unknown.
In this tutorial, an example of 2d blurring and deblurring will be shown using
the :py:class:`pylops.signalprocessing.Convolve2D` operator assuming knowledge
of the point-spread function.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
###############################################################################
# Let's start by importing a 2d image and defining the blurring operator
im = np.load("../testdata/python.npy")[::5, ::5, 0]
Nz, Nx = im.shape
# Blurring guassian operator
nh = [15, 25]
hz = np.exp(-0.1 * np.linspace(-(nh[0] // 2), nh[0] // 2, nh[0]) ** 2)
hx = np.exp(-0.03 * np.linspace(-(nh[1] // 2), nh[1] // 2, nh[1]) ** 2)
hz /= np.trapz(hz) # normalize the integral to 1
hx /= np.trapz(hx) # normalize the integral to 1
h = hz[:, np.newaxis] * hx[np.newaxis, :]
fig, ax = plt.subplots(1, 1, figsize=(5, 3))
him = ax.imshow(h)
ax.set_title("Blurring operator")
fig.colorbar(him, ax=ax)
ax.axis("tight")
Cop = pylops.signalprocessing.Convolve2D(
(Nz, Nx), h=h, offset=(nh[0] // 2, nh[1] // 2), dtype="float32"
)
###############################################################################
# We first apply the blurring operator to the sharp image. We then
# try to recover the sharp input image by inverting the convolution operator
# from the blurred image. Note that when we perform inversion without any
# regularization, the deblurred image will show some ringing due to the
# instabilities of the inverse process. Using a L1 solver with a DWT
# preconditioner or TV regularization allows to recover sharper contrasts.
imblur = Cop * im
imdeblur = pylops.optimization.leastsquares.normal_equations_inversion(
Cop, imblur.ravel(), None, maxiter=50 # solvers need 1D arrays
)[0]
imdeblur = imdeblur.reshape(Cop.dims)
Wop = pylops.signalprocessing.DWT2D((Nz, Nx), wavelet="haar", level=3)
Dop = [
pylops.FirstDerivative((Nz, Nx), axis=0, edge=False),
pylops.FirstDerivative((Nz, Nx), axis=1, edge=False),
]
DWop = Dop + [Wop]
imdeblurfista = pylops.optimization.sparsity.fista(
Cop * Wop.H, imblur.ravel(), eps=1e-1, niter=100
)[0]
imdeblurfista = imdeblurfista.reshape((Cop * Wop.H).dims)
imdeblurfista = Wop.H * imdeblurfista
imdeblurtv = pylops.optimization.sparsity.splitbregman(
Cop,
imblur.ravel(),
Dop,
niter_outer=10,
niter_inner=5,
mu=1.5,
epsRL1s=[2e0, 2e0],
tol=1e-4,
tau=1.0,
show=False,
**dict(iter_lim=5, damp=1e-4)
)[0]
imdeblurtv = imdeblurtv.reshape(Cop.dims)
imdeblurtv1 = pylops.optimization.sparsity.splitbregman(
Cop,
imblur.ravel(),
DWop,
niter_outer=10,
niter_inner=5,
mu=1.5,
epsRL1s=[1e0, 1e0, 1e0],
tol=1e-4,
tau=1.0,
show=False,
**dict(iter_lim=5, damp=1e-4)
)[0]
imdeblurtv1 = imdeblurtv1.reshape(Cop.dims)
###############################################################################
# Finally we visualize the original, blurred, and recovered images.
# sphinx_gallery_thumbnail_number = 2
fig = plt.figure(figsize=(12, 6))
fig.suptitle("Deblurring", fontsize=14, fontweight="bold", y=0.95)
ax1 = plt.subplot2grid((2, 5), (0, 0))
ax2 = plt.subplot2grid((2, 5), (0, 1))
ax3 = plt.subplot2grid((2, 5), (0, 2))
ax4 = plt.subplot2grid((2, 5), (1, 0))
ax5 = plt.subplot2grid((2, 5), (1, 1))
ax6 = plt.subplot2grid((2, 5), (1, 2))
ax7 = plt.subplot2grid((2, 5), (0, 3), colspan=2)
ax8 = plt.subplot2grid((2, 5), (1, 3), colspan=2)
ax1.imshow(im, cmap="viridis", vmin=0, vmax=250)
ax1.axis("tight")
ax1.set_title("Original")
ax2.imshow(imblur, cmap="viridis", vmin=0, vmax=250)
ax2.axis("tight")
ax2.set_title("Blurred")
ax3.imshow(imdeblur, cmap="viridis", vmin=0, vmax=250)
ax3.axis("tight")
ax3.set_title("Deblurred")
ax4.imshow(imdeblurfista, cmap="viridis", vmin=0, vmax=250)
ax4.axis("tight")
ax4.set_title("FISTA deblurred")
ax5.imshow(imdeblurtv, cmap="viridis", vmin=0, vmax=250)
ax5.axis("tight")
ax5.set_title("TV deblurred")
ax6.imshow(imdeblurtv1, cmap="viridis", vmin=0, vmax=250)
ax6.axis("tight")
ax6.set_title("TV+Haar deblurred")
ax7.plot(im[Nz // 2], "k")
ax7.plot(imblur[Nz // 2], "--r")
ax7.plot(imdeblur[Nz // 2], "--b")
ax7.plot(imdeblurfista[Nz // 2], "--g")
ax7.plot(imdeblurtv[Nz // 2], "--m")
ax7.plot(imdeblurtv1[Nz // 2], "--y")
ax7.axis("tight")
ax7.set_title("Horizontal section")
ax8.plot(im[:, Nx // 2], "k", label="Original")
ax8.plot(imblur[:, Nx // 2], "--r", label="Blurred")
ax8.plot(imdeblur[:, Nx // 2], "--b", label="Deblurred")
ax8.plot(imdeblurfista[:, Nx // 2], "--g", label="FISTA deblurred")
ax8.plot(imdeblurtv[:, Nx // 2], "--m", label="TV deblurred")
ax8.plot(imdeblurtv1[:, Nx // 2], "--y", label="TV+Haar deblurred")
ax8.axis("tight")
ax8.set_title("Vertical section")
ax8.legend(loc=5, fontsize="small")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
| 5,273 | 33.470588 | 80 | py |
pylops | pylops-master/tutorials/marchenko.py | """
10. Marchenko redatuming by inversion
=====================================
This example shows how to set-up and run the
:py:class:`pylops.waveeqprocessing.Marchenko` inversion using synthetic data.
"""
# sphinx_gallery_thumbnail_number = 5
# pylint: disable=C0103
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import convolve
from pylops.waveeqprocessing import Marchenko
warnings.filterwarnings("ignore")
plt.close("all")
###############################################################################
# Let's start by defining some input parameters and loading the test data
# Input parameters
inputfile = "../testdata/marchenko/input.npz"
vel = 2400.0 # velocity
toff = 0.045 # direct arrival time shift
nsmooth = 10 # time window smoothing
nfmax = 1000 # max frequency for MDC (#samples)
niter = 10 # iterations
inputdata = np.load(inputfile)
# Receivers
r = inputdata["r"]
nr = r.shape[1]
dr = r[0, 1] - r[0, 0]
# Sources
s = inputdata["s"]
ns = s.shape[1]
ds = s[0, 1] - s[0, 0]
# Virtual points
vs = inputdata["vs"]
# Density model
rho = inputdata["rho"]
z, x = inputdata["z"], inputdata["x"]
# Reflection data (R[s, r, t]) and subsurface fields
R = inputdata["R"][:, :, :-100]
R = np.swapaxes(R, 0, 1) # just because of how the data was saved
Gsub = inputdata["Gsub"][:-100]
G0sub = inputdata["G0sub"][:-100]
wav = inputdata["wav"]
wav_c = np.argmax(wav)
t = inputdata["t"][:-100]
ot, dt, nt = t[0], t[1] - t[0], len(t)
Gsub = np.apply_along_axis(convolve, 0, Gsub, wav, mode="full")
Gsub = Gsub[wav_c:][:nt]
G0sub = np.apply_along_axis(convolve, 0, G0sub, wav, mode="full")
G0sub = G0sub[wav_c:][:nt]
plt.figure(figsize=(10, 5))
plt.imshow(rho, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]))
plt.scatter(s[0, 5::10], s[1, 5::10], marker="*", s=150, c="r", edgecolors="k")
plt.scatter(r[0, ::10], r[1, ::10], marker="v", s=150, c="b", edgecolors="k")
plt.scatter(vs[0], vs[1], marker=".", s=250, c="m", edgecolors="k")
plt.axis("tight")
plt.xlabel("x [m]")
plt.ylabel("y [m]")
plt.title("Model and Geometry")
plt.xlim(x[0], x[-1])
fig, axs = plt.subplots(1, 3, sharey=True, figsize=(12, 7))
axs[0].imshow(
R[0].T, cmap="gray", vmin=-1e-2, vmax=1e-2, extent=(r[0, 0], r[0, -1], t[-1], t[0])
)
axs[0].set_title("R shot=0")
axs[0].set_xlabel(r"$x_R$")
axs[0].set_ylabel(r"$t$")
axs[0].axis("tight")
axs[0].set_ylim(1.5, 0)
axs[1].imshow(
R[ns // 2].T,
cmap="gray",
vmin=-1e-2,
vmax=1e-2,
extent=(r[0, 0], r[0, -1], t[-1], t[0]),
)
axs[1].set_title("R shot=%d" % (ns // 2))
axs[1].set_xlabel(r"$x_R$")
axs[1].set_ylabel(r"$t$")
axs[1].axis("tight")
axs[1].set_ylim(1.5, 0)
axs[2].imshow(
R[-1].T, cmap="gray", vmin=-1e-2, vmax=1e-2, extent=(r[0, 0], r[0, -1], t[-1], t[0])
)
axs[2].set_title("R shot=%d" % ns)
axs[2].set_xlabel(r"$x_R$")
axs[2].axis("tight")
axs[2].set_ylim(1.5, 0)
fig.tight_layout()
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(8, 6))
axs[0].imshow(
Gsub, cmap="gray", vmin=-1e6, vmax=1e6, extent=(r[0, 0], r[0, -1], t[-1], t[0])
)
axs[0].set_title("G")
axs[0].set_xlabel(r"$x_R$")
axs[0].set_ylabel(r"$t$")
axs[0].axis("tight")
axs[0].set_ylim(1.5, 0)
axs[1].imshow(
G0sub, cmap="gray", vmin=-1e6, vmax=1e6, extent=(r[0, 0], r[0, -1], t[-1], t[0])
)
axs[1].set_title("G0")
axs[1].set_xlabel(r"$x_R$")
axs[1].set_ylabel(r"$t$")
axs[1].axis("tight")
axs[1].set_ylim(1.5, 0)
fig.tight_layout()
##############################################################################
# Let's now create an object of the
# :py:class:`pylops.waveeqprocessing.Marchenko` class and apply redatuming
# for a single subsurface point ``vs``.
# direct arrival window
trav = np.sqrt((vs[0] - r[0]) ** 2 + (vs[1] - r[1]) ** 2) / vel
MarchenkoWM = Marchenko(
R, dt=dt, dr=dr, nfmax=nfmax, wav=wav, toff=toff, nsmooth=nsmooth
)
(
f1_inv_minus,
f1_inv_plus,
p0_minus,
g_inv_minus,
g_inv_plus,
) = MarchenkoWM.apply_onepoint(
trav,
G0=G0sub.T,
rtm=True,
greens=True,
dottest=True,
**dict(iter_lim=niter, show=True)
)
g_inv_tot = g_inv_minus + g_inv_plus
##############################################################################
# We can now compare the result of Marchenko redatuming via LSQR
# with standard redatuming
fig, axs = plt.subplots(1, 3, sharey=True, figsize=(12, 7))
axs[0].imshow(
p0_minus.T,
cmap="gray",
vmin=-5e5,
vmax=5e5,
extent=(r[0, 0], r[0, -1], t[-1], -t[-1]),
)
axs[0].set_title(r"$p_0^-$")
axs[0].set_xlabel(r"$x_R$")
axs[0].set_ylabel(r"$t$")
axs[0].axis("tight")
axs[0].set_ylim(1.2, 0)
axs[1].imshow(
g_inv_minus.T,
cmap="gray",
vmin=-5e5,
vmax=5e5,
extent=(r[0, 0], r[0, -1], t[-1], -t[-1]),
)
axs[1].set_title(r"$g^-$")
axs[1].set_xlabel(r"$x_R$")
axs[1].set_ylabel(r"$t$")
axs[1].axis("tight")
axs[1].set_ylim(1.2, 0)
axs[2].imshow(
g_inv_plus.T,
cmap="gray",
vmin=-5e5,
vmax=5e5,
extent=(r[0, 0], r[0, -1], t[-1], -t[-1]),
)
axs[2].set_title(r"$g^+$")
axs[2].set_xlabel(r"$x_R$")
axs[2].set_ylabel(r"$t$")
axs[2].axis("tight")
axs[2].set_ylim(1.2, 0)
fig.tight_layout()
fig = plt.figure(figsize=(12, 7))
ax1 = plt.subplot2grid((1, 5), (0, 0), colspan=2)
ax2 = plt.subplot2grid((1, 5), (0, 2), colspan=2)
ax3 = plt.subplot2grid((1, 5), (0, 4))
ax1.imshow(
Gsub, cmap="gray", vmin=-5e5, vmax=5e5, extent=(r[0, 0], r[0, -1], t[-1], t[0])
)
ax1.set_title(r"$G_{true}$")
axs[0].set_xlabel(r"$x_R$")
axs[0].set_ylabel(r"$t$")
ax1.axis("tight")
ax1.set_ylim(1.2, 0)
ax2.imshow(
g_inv_tot.T,
cmap="gray",
vmin=-5e5,
vmax=5e5,
extent=(r[0, 0], r[0, -1], t[-1], -t[-1]),
)
ax2.set_title(r"$G_{est}$")
axs[1].set_xlabel(r"$x_R$")
axs[1].set_ylabel(r"$t$")
ax2.axis("tight")
ax2.set_ylim(1.2, 0)
ax3.plot(Gsub[:, nr // 2] / Gsub.max(), t, "r", lw=5)
ax3.plot(g_inv_tot[nr // 2, nt - 1 :] / g_inv_tot.max(), t, "k", lw=3)
ax3.set_ylim(1.2, 0)
fig.tight_layout()
##############################################################################
# Note that Marchenko redatuming can also be applied simultaneously
# to multiple subsurface points. Use
# :py:func:`pylops.waveeqprocessing.Marchenko.apply_multiplepoints` instead of
# :py:func:`pylops.waveeqprocessing.Marchenko.apply_onepoint`.
| 6,260 | 25.871245 | 88 | py |
pylops | pylops-master/tutorials/dottest.py | """
02. The Dot-Test
================
One of the most important aspect of writing a *Linear operator* is to be able
to verify that the code implemented in *forward mode* and the code implemented
in *adjoint mode* are effectively adjoint to each other. If this is the case,
your Linear operator will successfully pass the so-called **dot-test**.
Refer to the *Notes* section of :py:func:`pylops.utils.dottest`)
for a more detailed description.
In this example, I will show you how to use the dot-test for a variety of
operator when model and data are either real or complex numbers.
"""
import matplotlib.gridspec as pltgs
import matplotlib.pyplot as plt
# pylint: disable=C0103
import numpy as np
import pylops
from pylops.utils import dottest
plt.close("all")
###############################################################################
# Let's start with something very simple. We will make a :py:class:`pylops.MatrixMult`
# operator and verify that its implementation passes the dot-test.
# For this time, we will do this step-by-step, replicating what happens in the
# :py:func:`pylops.utils.dottest` routine.
N, M = 5, 3
Mat = np.arange(N * M).reshape(N, M)
Op = pylops.MatrixMult(Mat)
v = np.random.randn(N)
u = np.random.randn(M)
# Op * u
y = Op.matvec(u)
# Op'* v
x = Op.rmatvec(v)
yy = np.dot(y, v) # (Op * u)' * v
xx = np.dot(u, x) # u' * (Op' * v)
print(f"Dot-test {np.abs((yy - xx) / ((yy + xx + 1e-15) / 2)):.2e}")
###############################################################################
# And here is a visual intepretation of what a dot-test is
gs = pltgs.GridSpec(1, 9)
fig = plt.figure(figsize=(7, 3))
ax = plt.subplot(gs[0, 0:2])
ax.imshow(Op.A, cmap="rainbow")
ax.set_title(r"$(Op*$", size=20, fontweight="bold")
ax.set_xticks(np.arange(M - 1) + 0.5)
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.axis("tight")
ax = plt.subplot(gs[0, 2])
ax.imshow(u[:, np.newaxis], cmap="rainbow")
ax.set_title(r"$u)^T$", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.axis("tight")
ax = plt.subplot(gs[0, 3])
ax.imshow(v[:, np.newaxis], cmap="rainbow")
ax.set_title(r"$v$", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 4])
ax.text(
0.35,
0.5,
"=",
horizontalalignment="center",
verticalalignment="center",
size=40,
fontweight="bold",
)
ax.axis("off")
ax = plt.subplot(gs[0, 5])
ax.imshow(u[:, np.newaxis].T, cmap="rainbow")
ax.set_title(r"$u^T$", size=20, fontweight="bold")
ax.set_xticks(np.arange(M - 1) + 0.5)
ax.set_yticks([])
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 6:8])
ax.imshow(Op.A.T, cmap="rainbow")
ax.set_title(r"$(Op^T*$", size=20, fontweight="bold")
ax.set_xticks(np.arange(N - 1) + 0.5)
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.axis("tight")
ax = plt.subplot(gs[0, 8])
ax.imshow(v[:, np.newaxis], cmap="rainbow")
ax.set_title(r"$v)$", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()
###############################################################################
# From now on, we can simply use the :py:func:`pylops.utils.dottest` implementation
# of the dot-test and pass the operator we would like to validate,
# its size in the model and data spaces and optionally the tolerance we will be
# accepting for the dot-test to be considered succesfull. Finally we need to
# specify if our data or/and model vectors contain complex numbers using the
# ``complexflag`` parameter. While the dot-test will return ``True`` when
# succesfull and ``False`` otherwise, we can also ask to print its outcome putting the
# ``verb`` parameters to ``True``.
N = 10
d = np.arange(N)
Dop = pylops.Diagonal(d)
_ = dottest(Dop, N, N, rtol=1e-6, complexflag=0, verb=True)
###############################################################################
# We move now to a more complicated operator, the :py:func:`pylops.signalprocessing.FFT`
# operator. We use once again the :py:func:`pylops.utils.dottest` to verify its implementation
# and since we are dealing with a transform that can be applied to both real and complex
# array, we try different combinations using the ``complexflag`` input.
dt = 0.005
nt = 100
nfft = 2**10
FFTop = pylops.signalprocessing.FFT(
dims=(nt,), nfft=nfft, sampling=dt, dtype=np.complex128
)
dottest(FFTop, nfft, nt, complexflag=2, verb=True)
_ = dottest(FFTop, nfft, nt, complexflag=3, verb=True)
| 4,980 | 33.116438 | 94 | py |
pylops | pylops-master/tutorials/classsolvers.py | r"""
03. Solvers (Advanced)
======================
This is a follow up tutorial to the :ref:`sphx_glr_tutorials_solvers.py` tutorial. The same example will be considered,
however we will showcase how to use the class-based version of our solvers (introduced in PyLops v2).
First of all, when shall you use class-based solvers over function-based ones? The answer is simple, every time you feel
you would have like to have more flexibility when using one PyLops function-based solvers.
In fact, a function-based solver in PyLops v2 is nothing more than a thin wrapper over its class-based equivalent,
which generally performs the following steps:
- solver initialization
- ``setup``
- ``run`` (by calling multiple times ``step``)
- ``finalize``
The nice thing about class-based solvers is that i) a user can manually orchestrate these steps and do anything
in between them; ii) a user can create a class-based :py:class:`pylops.optimization.callback.Callbacks` object and
define a set of callbacks that will be run pre and post setup, step and run. One example of how such callbacks can
be handy to track evolving variables in the solver can be found in :ref:`sphx_glr_gallery_plot_linearregr.py`.
In the following we will leverage the very same mechanism to keep track of a number of metrics using the predefined
:class:`pylops.optimization.callback.MetricsCallback` callback. Finally we show how to create a customized callback
that can track the percentage change of the solution and residual. This is of course just an example, we expect
users will find different use cases based on the problem at hand.
"""
import matplotlib.pyplot as plt
# pylint: disable=C0103
import numpy as np
import pylops
plt.close("all")
np.random.seed(10)
###############################################################################
# Let's first create the data in the frequency domain. The data is composed
# by the superposition of 3 sinusoids with different frequencies.
# Signal creation in frequency domain
ifreqs = [41, 25, 66]
amps = [1.0, 1.0, 1.0]
N = 200
nfft = 2**11
dt = 0.004
t = np.arange(N) * dt
f = np.fft.rfftfreq(nfft, dt)
FFTop = 10 * pylops.signalprocessing.FFT(N, nfft=nfft, real=True)
X = np.zeros(nfft // 2 + 1, dtype="complex128")
X[ifreqs] = amps
x = FFTop.H * X
###############################################################################
# We now define the locations at which the signal will be sampled.
# subsampling locations
perc_subsampling = 0.2
Nsub = int(np.round(N * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(N))[:Nsub])
# Create restriction operator
Rop = pylops.Restriction(N, iava, dtype="float64")
y = Rop * x
ymask = Rop.mask(x)
###############################################################################
# Let's now solve the interpolation problem using the
# :py:class:`pylops.optimization.sparsity.ISTA` class-based solver.
cb = pylops.optimization.callback.MetricsCallback(x, FFTop.H)
istasolve = pylops.optimization.sparsity.ISTA(
Rop * FFTop.H,
callbacks=[
cb,
],
)
pista, niteri, costi = istasolve.solve(y, niter=1000, eps=0.1, tol=1e-7)
xista = FFTop.H * pista
fig, axs = plt.subplots(1, 4, figsize=(16, 3))
for i, metric in enumerate(["mae", "mse", "snr", "psnr"]):
axs[i].plot(cb.metrics[metric], "k", lw=2)
axs[i].set_title(metric)
plt.tight_layout()
###############################################################################
# Finally, we show how we can also define customized callbacks. What we are
# really interested in here is to store the first residual norm once the setup
# of the solver is over, and repeat the same after each step (using the previous
# estimate to compute the percentage change). And, we do the same for the
# solution norm.
class CallbackISTA(pylops.optimization.callback.Callbacks):
def __init__(self):
self.res_perc = []
self.x_perc = []
def on_setup_end(self, solver, x):
self.x = x
if x is not None:
self.rec = solver.Op @ x - solver.y
else:
self.rec = None
def on_step_end(self, solver, x):
self.xold = self.x
self.x = x
self.recold = self.rec
self.rec = solver.Op @ x - solver.y
if self.xold is not None:
self.x_perc.append(
100 * np.linalg.norm(self.x - self.xold) / np.linalg.norm(self.xold)
)
self.res_perc.append(
100
* np.linalg.norm(self.rec - self.recold)
/ np.linalg.norm(self.recold)
)
def on_run_end(self, solver, x):
# remove first percentage
self.x_perc = np.array(self.x_perc[1:])
self.res_perc = np.array(self.res_perc[1:])
cb = CallbackISTA()
istasolve = pylops.optimization.sparsity.ISTA(
Rop * FFTop.H,
callbacks=[
cb,
],
)
pista, niteri, costi = istasolve.solve(y, niter=1000, eps=0.1, tol=1e-7)
xista = FFTop.H * pista
cbf = CallbackISTA()
fistasolve = pylops.optimization.sparsity.FISTA(
Rop * FFTop.H,
callbacks=[
cbf,
],
)
pfista, niterf, costf = fistasolve.solve(y, niter=1000, eps=0.1, tol=1e-7)
xfista = FFTop.H * pfista
fig, axs = plt.subplots(2, 1, figsize=(12, 8))
fig.suptitle("Data reconstruction with sparsity", fontsize=14, fontweight="bold", y=0.9)
axs[0].plot(f, np.abs(X), "k", lw=3)
axs[0].plot(f, np.abs(pista), "--r", lw=3)
axs[0].plot(f, np.abs(pfista), "--g", lw=3)
axs[0].set_xlim(0, 30)
axs[0].set_title("Frequency domain")
axs[1].plot(t[iava], y, ".k", ms=20, label="available samples")
axs[1].plot(t, x, "k", lw=3, label="original")
axs[1].plot(t, xista, "--r", lw=3, label="ISTA")
axs[1].plot(t, xfista, "--g", lw=3, label="FISTA")
axs[1].set_title("Time domain")
axs[1].axis("tight")
axs[1].legend()
plt.tight_layout()
plt.subplots_adjust(top=0.8)
fig, axs = plt.subplots(2, 1, figsize=(12, 8))
fig.suptitle("Norms history", fontsize=14, fontweight="bold", y=0.9)
axs[0].semilogy(cb.res_perc, "r", lw=3)
axs[0].semilogy(cbf.res_perc, "g", lw=3)
axs[0].set_title("Residual percentage change")
axs[1].semilogy(cb.x_perc, "r", lw=3, label="ISTA")
axs[1].semilogy(cbf.x_perc, "g", lw=3, label="FISTA")
axs[1].set_title("Solution percentage change")
axs[1].legend()
plt.tight_layout()
plt.subplots_adjust(top=0.8)
| 6,320 | 33.353261 | 120 | py |
pylops | pylops-master/tutorials/radonfiltering.py | r"""
11. Radon filtering
===================
In this example we will be taking advantage of the
:py:class:`pylops.signalprocessing.Radon2D` operator to perform filtering of
unwanted events from a seismic data. For those of you not familiar with seismic
data, let's imagine that we have a data composed of a certain number of flat
events and a parabolic event , we are after a transform that allows us to
separate such an event from the others and filter it out.
Those of you with a geophysics background may immediately realize this
is the case of seismic angle (or offset) gathers after migration and those
events with parabolic moveout are generally residual multiples that we would
like to suppress prior to performing further analysis of our data.
The Radon transform is actually a very good transform to perform such a
separation. We can thus devise a simple workflow that takes our data as input,
applies a Radon transform, filters some of the events out and goes back to the
original domain.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
from pylops.utils.wavelets import ricker
plt.close("all")
np.random.seed(0)
###############################################################################
# Let's first create a data composed on 3 linear events and a parabolic event.
par = {"ox": 0, "dx": 2, "nx": 121, "ot": 0, "dt": 0.004, "nt": 100, "f0": 30}
# linear events
v = 1500 # m/s
t0 = [0.1, 0.2, 0.3] # s
theta = [0, 0, 0]
amp = [1.0, -2, 0.5]
# parabolic event
tp0 = [0.13] # s
px = [0] # s/m
pxx = [5e-7] # s²/m²
ampp = [0.7]
# create axis
taxis, taxis2, xaxis, yaxis = pylops.utils.seismicevents.makeaxis(par)
# create wavelet
wav = ricker(taxis[:41], f0=par["f0"])[0]
# generate model
y = (
pylops.utils.seismicevents.linear2d(xaxis, taxis, v, t0, theta, amp, wav)[1]
+ pylops.utils.seismicevents.parabolic2d(xaxis, taxis, tp0, px, pxx, ampp, wav)[1]
)
###############################################################################
# We can now create the :py:class:`pylops.signalprocessing.Radon2D` operator.
# We also apply its adjoint to the data to obtain a representation of those
# 3 linear events overlapping to a parabolic event in the Radon domain.
# Similarly, we feed the operator to a sparse solver like
# :py:class:`pylops.optimization.sparsity.FISTA` to obtain a sparse
# represention of the data in the Radon domain. At this point we try to filter
# out the unwanted event. We can see how this is much easier for the sparse
# transform as each event has a much more compact representation in the Radon
# domain than for the adjoint transform.
# radon operator
npx = 61
pxmax = 5e-4 # s/m
px = np.linspace(-pxmax, pxmax, npx)
Rop = pylops.signalprocessing.Radon2D(
taxis, xaxis, px, kind="linear", interp="nearest", centeredh=False, dtype="float64"
)
# adjoint Radon transform
xadj = Rop.H * y
# sparse Radon transform
xinv, niter, cost = pylops.optimization.sparsity.fista(
Rop, y.ravel(), niter=15, eps=1e1
)
xinv = xinv.reshape(Rop.dims)
# filtering
xfilt = np.zeros_like(xadj)
xfilt[npx // 2 - 3 : npx // 2 + 4] = xadj[npx // 2 - 3 : npx // 2 + 4]
yfilt = Rop * xfilt
# filtering on sparse transform
xinvfilt = np.zeros_like(xinv)
xinvfilt[npx // 2 - 3 : npx // 2 + 4] = xinv[npx // 2 - 3 : npx // 2 + 4]
yinvfilt = Rop * xinvfilt
###############################################################################
# Finally we visualize our results.
pclip = 0.7
fig, axs = plt.subplots(1, 5, sharey=True, figsize=(12, 5))
axs[0].imshow(
y.T,
cmap="gray",
vmin=-pclip * np.abs(y).max(),
vmax=pclip * np.abs(y).max(),
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[0].set(xlabel="$x$ [m]", ylabel="$t$ [s]", title="Data")
axs[0].axis("tight")
axs[1].imshow(
xadj.T,
cmap="gray",
vmin=-pclip * np.abs(xadj).max(),
vmax=pclip * np.abs(xadj).max(),
extent=(px[0], px[-1], taxis[-1], taxis[0]),
)
axs[1].axvline(px[npx // 2 - 3], color="r", linestyle="--")
axs[1].axvline(px[npx // 2 + 3], color="r", linestyle="--")
axs[1].set(xlabel="$p$ [s/m]", title="Radon")
axs[1].axis("tight")
axs[2].imshow(
yfilt.T,
cmap="gray",
vmin=-pclip * np.abs(yfilt).max(),
vmax=pclip * np.abs(yfilt).max(),
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[2].set(xlabel="$x$ [m]", title="Filtered data")
axs[2].axis("tight")
axs[3].imshow(
xinv.T,
cmap="gray",
vmin=-pclip * np.abs(xinv).max(),
vmax=pclip * np.abs(xinv).max(),
extent=(px[0], px[-1], taxis[-1], taxis[0]),
)
axs[3].axvline(px[npx // 2 - 3], color="r", linestyle="--")
axs[3].axvline(px[npx // 2 + 3], color="r", linestyle="--")
axs[3].set(xlabel="$p$ [s/m]", title="Sparse Radon")
axs[3].axis("tight")
axs[4].imshow(
yinvfilt.T,
cmap="gray",
vmin=-pclip * np.abs(y).max(),
vmax=pclip * np.abs(y).max(),
extent=(xaxis[0], xaxis[-1], taxis[-1], taxis[0]),
)
axs[4].set(xlabel="$x$ [m]", title="Sparse filtered data")
axs[4].axis("tight")
plt.tight_layout()
###############################################################################
# As expected, the Radon domain is a suitable domain for this type of filtering
# and the sparse transform improves the ability to filter out parabolic events
# with small curvature.
#
# On the other hand, it is important to note that we have not been able to
# correctly preserve the amplitudes of each event. This is because the sparse
# Radon transform can only identify a sparsest response that explain the data
# within a certain threshold. For this reason a more suitable approach for
# preserving amplitudes could be to apply a parabolic Raodn transform with the
# aim of reconstructing only the unwanted event and apply an adaptive
# subtraction between the input data and the reconstructed unwanted event.
| 5,808 | 33.993976 | 87 | py |
pylops | pylops-master/tutorials/deghosting.py | r"""
13. Deghosting
==============
Single-component seismic data can be decomposed
in their up- and down-going constituents in a model driven fashion.
This task can be achieved by defining an f-k propagator (or ghost model) and
solving an inverse problem as described in
:func:`pylops.waveeqprocessing.Deghosting`.
"""
import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 3
import numpy as np
from scipy.sparse.linalg import lsqr
import pylops
np.random.seed(0)
plt.close("all")
###############################################################################
# Let's start by loading the input dataset and geometry
inputfile = "../testdata/updown/input.npz"
inputdata = np.load(inputfile)
vel_sep = 2400.0 # velocity at separation level
clip = 1e-1 # plotting clip
# Receivers
r = inputdata["r"]
nr = r.shape[1]
dr = r[0, 1] - r[0, 0]
# Sources
s = inputdata["s"]
# Model
rho = inputdata["rho"]
# Axes
t = inputdata["t"]
nt, dt = len(t), t[1] - t[0]
x, z = inputdata["x"], inputdata["z"]
dx, dz = x[1] - x[0], z[1] - z[0]
# Data
p = inputdata["p"].T
p /= p.max()
fig = plt.figure(figsize=(9, 4))
ax1 = plt.subplot2grid((1, 5), (0, 0), colspan=4)
ax2 = plt.subplot2grid((1, 5), (0, 4))
ax1.imshow(rho, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]))
ax1.scatter(r[0, ::5], r[1, ::5], marker="v", s=150, c="b", edgecolors="k")
ax1.scatter(s[0], s[1], marker="*", s=250, c="r", edgecolors="k")
ax1.axis("tight")
ax1.set_xlabel("x [m]")
ax1.set_ylabel("y [m]")
ax1.set_title("Model and Geometry")
ax1.set_xlim(x[0], x[-1])
ax1.set_ylim(z[-1], z[0])
ax2.plot(rho[:, len(x) // 2], z, "k", lw=2)
ax2.set_ylim(z[-1], z[0])
ax2.set_yticks([])
plt.tight_layout()
###############################################################################
# To be able to deghost the input dataset, we need to remove its direct
# arrival. In this example we will create a mask based on the analytical
# traveltime of the direct arrival.
direct = np.sqrt(np.sum((s[:, np.newaxis] - r) ** 2, axis=0)) / vel_sep
# Window
off = 0.035
direct_off = direct + off
win = np.zeros((nt, nr))
iwin = np.round(direct_off / dt).astype(int)
for i in range(nr):
win[iwin[i] :, i] = 1
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(8, 7))
axs[0].imshow(
p.T,
cmap="gray",
vmin=-clip * np.abs(p).max(),
vmax=clip * np.abs(p).max(),
extent=(r[0, 0], r[0, -1], t[-1], t[0]),
)
axs[0].plot(r[0], direct_off, "r", lw=2)
axs[0].set_title(r"$P$")
axs[0].axis("tight")
axs[1].imshow(
win * p.T,
cmap="gray",
vmin=-clip * np.abs(p).max(),
vmax=clip * np.abs(p).max(),
extent=(r[0, 0], r[0, -1], t[-1], t[0]),
)
axs[1].set_title(r"Windowed $P$")
axs[1].axis("tight")
axs[1].set_ylim(1, 0)
plt.tight_layout()
###############################################################################
# We can now perform deghosting
pup, pdown = pylops.waveeqprocessing.Deghosting(
p.T,
nt,
nr,
dt,
dr,
vel_sep,
r[1, 0] + dz,
win=win,
npad=5,
ntaper=11,
solver=lsqr,
dottest=False,
dtype="complex128",
**dict(damp=1e-10, iter_lim=60)
)
fig, axs = plt.subplots(1, 3, sharey=True, figsize=(12, 7))
axs[0].imshow(
p.T,
cmap="gray",
vmin=-clip * np.abs(p).max(),
vmax=clip * np.abs(p).max(),
extent=(r[0, 0], r[0, -1], t[-1], t[0]),
)
axs[0].set_title(r"$P$")
axs[0].axis("tight")
axs[1].imshow(
pup,
cmap="gray",
vmin=-clip * np.abs(p).max(),
vmax=clip * np.abs(p).max(),
extent=(r[0, 0], r[0, -1], t[-1], t[0]),
)
axs[1].set_title(r"$P^-$")
axs[1].axis("tight")
axs[2].imshow(
pdown,
cmap="gray",
vmin=-clip * np.abs(p).max(),
vmax=clip * np.abs(p).max(),
extent=(r[0, 0], r[0, -1], t[-1], t[0]),
)
axs[2].set_title(r"$P^+$")
axs[2].axis("tight")
axs[2].set_ylim(1, 0)
plt.figure(figsize=(14, 3))
plt.plot(t, p[nr // 2], "k", lw=2, label=r"$p$")
plt.plot(t, pup[:, nr // 2], "r", lw=2, label=r"$p^-$")
plt.xlim(0, t[200])
plt.ylim(-0.2, 0.2)
plt.legend()
plt.tight_layout()
plt.figure(figsize=(14, 3))
plt.plot(t, pdown[:, nr // 2], "b", lw=2, label=r"$p^+$")
plt.plot(t, pup[:, nr // 2], "r", lw=2, label=r"$p^-$")
plt.xlim(0, t[200])
plt.ylim(-0.2, 0.2)
plt.legend()
plt.tight_layout()
###############################################################################
# To see more examples head over to the following notebook:
# `notebook1 <https://github.com/mrava87/pylops_notebooks/blob/master/developement/WavefieldSeparation-singlecomponent.ipynb>`_.
| 4,499 | 24.280899 | 128 | py |
pylops | pylops-master/tutorials/solvers.py | r"""
03. Solvers
===========
This tutorial will guide you through the :py:mod:`pylops.optimization`
module and show how to use various solvers that are included in the
PyLops library.
The main idea here is to provide the user of PyLops with very high-level
functionalities to quickly and easily set up and solve complex systems of
linear equations as well as include regularization and/or preconditioning terms
(all of those constructed by means of PyLops linear operators).
To make this tutorial more interesting, we will present a real life problem
and show how the choice of the solver and regularization/preconditioning terms
is vital in many circumstances to successfully retrieve an estimate of
the model. The problem that we are going to consider is generally referred to
as the *data reconstruction* problem and aims at reconstructing a regularly
sampled signal of size :math:`M` from :math:`N` randomly selected samples:
.. math::
\mathbf{y} = \mathbf{R} \mathbf{x}
where the restriction operator :math:`\mathbf{R}` that selects the :math:`M`
elements from :math:`\mathbf{x}` at random locations is implemented using
:py:class:`pylops.Restriction`, and
.. math::
\mathbf{y}= [y_1, y_2,\ldots,y_N]^T, \qquad
\mathbf{x}= [x_1, x_2,\ldots,x_M]^T, \qquad
with :math:`M \gg N`.
"""
import matplotlib.pyplot as plt
# pylint: disable=C0103
import numpy as np
import pylops
plt.close("all")
np.random.seed(10)
###############################################################################
# Let's first create the data in the frequency domain. The data is composed
# by the superposition of 3 sinusoids with different frequencies.
# Signal creation in frequency domain
ifreqs = [41, 25, 66]
amps = [1.0, 1.0, 1.0]
N = 200
nfft = 2**11
dt = 0.004
t = np.arange(N) * dt
f = np.fft.rfftfreq(nfft, dt)
FFTop = 10 * pylops.signalprocessing.FFT(N, nfft=nfft, real=True)
X = np.zeros(nfft // 2 + 1, dtype="complex128")
X[ifreqs] = amps
x = FFTop.H * X
fig, axs = plt.subplots(2, 1, figsize=(12, 8))
axs[0].plot(f, np.abs(X), "k", lw=2)
axs[0].set_xlim(0, 30)
axs[0].set_title("Data(frequency domain)")
axs[1].plot(t, x, "k", lw=2)
axs[1].set_title("Data(time domain)")
axs[1].axis("tight")
plt.tight_layout()
###############################################################################
# We now define the locations at which the signal will be sampled.
# subsampling locations
perc_subsampling = 0.2
Nsub = int(np.round(N * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(N))[:Nsub])
# Create restriction operator
Rop = pylops.Restriction(N, iava, dtype="float64")
y = Rop * x
ymask = Rop.mask(x)
# Visualize data
fig = plt.figure(figsize=(12, 4))
plt.plot(t, x, "k", lw=3)
plt.plot(t, x, ".k", ms=20, label="all samples")
plt.plot(t, ymask, ".g", ms=15, label="available samples")
plt.legend()
plt.title("Data restriction")
plt.tight_layout()
###############################################################################
# To start let's consider the simplest *'solver'*, i.e., *least-square inversion
# without regularization*. We aim here to minimize the following cost function:
#
# .. math::
# J = \|\mathbf{y} - \mathbf{R} \mathbf{x}\|_2^2
#
# Depending on the choice of the operator :math:`\mathbf{R}`, such problem can
# be solved using explicit matrix solvers as well as iterative solvers. In
# this case we will be using the latter approach
# (more specifically the scipy implementation of the *LSQR* solver -
# i.e., :py:func:`scipy.sparse.linalg.lsqr`) as we do not want to explicitly
# create and invert a matrix. In most cases this will be the only viable
# approach as most of the large-scale optimization problems that we are
# interested to solve using PyLops do not lend naturally to the creation and
# inversion of explicit matrices.
#
# This first solver can be very easily implemented using the
# ``/`` for PyLops operators, which will automatically call the
# :py:func:`scipy.sparse.linalg.lsqr` with some default parameters.
xinv = Rop / y
###############################################################################
# We can also use :py:func:`pylops.optimization.leastsquares.regularized_inversion`
# (without regularization term for now) and customize our solvers using
# ``kwargs``.
xinv = pylops.optimization.leastsquares.regularized_inversion(
Rop, y, [], **dict(damp=0, iter_lim=10, show=True)
)[0]
###############################################################################
# Finally we can select a different starting guess from the null vector
xinv_fromx0 = pylops.optimization.leastsquares.regularized_inversion(
Rop, y, [], x0=np.ones(N), **dict(damp=0, iter_lim=10, show=True)
)[0]
###############################################################################
# The cost function above can be also expanded in terms of
# its *normal equations*
#
# .. math::
# \mathbf{x}_{ne}= (\mathbf{R}^T \mathbf{R})^{-1}
# \mathbf{R}^T \mathbf{y}
#
# The method :py:func:`pylops.optimization.leastsquares.normal_equations_inversion`
# implements such system of equations explicitly and solves them using an
# iterative scheme suitable for square matrices (i.e., :math:`M=N`).
#
# While this approach may seem not very useful, we will soon see how
# regularization terms could be easily added to the normal equations using
# this method.
xne = pylops.optimization.leastsquares.normal_equations_inversion(Rop, y, [])[0]
###############################################################################
# Let's now visualize the different inversion results
fig = plt.figure(figsize=(12, 4))
plt.plot(t, x, "k", lw=2, label="original")
plt.plot(t, xinv, "b", ms=10, label="inversion")
plt.plot(t, xinv_fromx0, "--r", ms=10, label="inversion from x0")
plt.plot(t, xne, "--g", ms=10, label="normal equations")
plt.legend()
plt.title("Data reconstruction without regularization")
plt.tight_layout()
###############################################################################
# Regularization
# ~~~~~~~~~~~~~~
# You may have noticed that none of the inversion has been successfull in
# recovering the original signal. This is a clear indication that
# the problem we are trying to solve is highly ill-posed and requires some
# prior knowledge from the user.
#
# We will now see how to add prior information to the inverse process in the
# form of regularization (or preconditioning). This can be done in two
# different ways
#
# * regularization via :py:func:`pylops.optimization.leastsquares.normal_equations_inversion`
# or :py:func:`pylops.optimization.leastsquares.regularized_inversion`)
# * preconditioning via :py:func:`pylops.optimization.leastsquares.preconditioned_inversion`
#
# Let's start by regularizing the normal equations using a second
# derivative operator
#
# .. math::
# \mathbf{x} = (\mathbf{R^TR}+\epsilon_\nabla^2\nabla^T\nabla)^{-1}
# \mathbf{R^Ty}
# Create regularization operator
D2op = pylops.SecondDerivative(N, dtype="float64")
# Regularized inversion
epsR = np.sqrt(0.1)
epsI = np.sqrt(1e-4)
xne = pylops.optimization.leastsquares.normal_equations_inversion(
Rop, y, [D2op], epsI=epsI, epsRs=[epsR], **dict(maxiter=50)
)[0]
###############################################################################
# Note that in case we have access to a fast implementation for the chain of
# forward and adjoint for the regularization operator
# (i.e., :math:`\nabla^T\nabla`), we can modify our call to
# :py:func:`pylops.optimization.leastsquares.normal_equations_inversion` as
# follows:
ND2op = pylops.MatrixMult((D2op.H * D2op).tosparse()) # mimic fast D^T D
xne1 = pylops.optimization.leastsquares.normal_equations_inversion(
Rop, y, [], NRegs=[ND2op], epsI=epsI, epsNRs=[epsR], **dict(maxiter=50)
)[0]
###############################################################################
# We can do the same while using
# :py:func:`pylops.optimization.leastsquares.regularized_inversion`
# which solves the following augmented problem
#
# .. math::
# \begin{bmatrix}
# \mathbf{R} \\
# \epsilon_\nabla \nabla
# \end{bmatrix} \mathbf{x} =
# \begin{bmatrix}
# \mathbf{y} \\
# 0
# \end{bmatrix}
xreg = pylops.optimization.leastsquares.regularized_inversion(
Rop,
y,
[D2op],
epsRs=[np.sqrt(0.1)],
**dict(damp=np.sqrt(1e-4), iter_lim=50, show=0)
)[0]
###############################################################################
# We can also write a preconditioned problem, whose cost function is
#
# .. math::
# J= \|\mathbf{y} - \mathbf{R} \mathbf{P} \mathbf{p}\|_2^2
#
# where :math:`\mathbf{P}` is the precondioned operator, :math:`\mathbf{p}` is
# the projected model in the preconditioned space, and
# :math:`\mathbf{x}=\mathbf{P}\mathbf{p}` is the model in the original model
# space we want to solve for. Note that a preconditioned problem converges
# much faster to its solution than its corresponding regularized problem.
# This can be done using the routine
# :py:func:`pylops.optimization.leastsquares.preconditioned_inversion`.
# Create regularization operator
Sop = pylops.Smoothing1D(nsmooth=11, dims=[N], dtype="float64")
# Invert for interpolated signal
xprec = pylops.optimization.leastsquares.preconditioned_inversion(
Rop, y, Sop, **dict(damp=np.sqrt(1e-9), iter_lim=20, show=0)
)[0]
###############################################################################
# Let's finally visualize these solutions
# sphinx_gallery_thumbnail_number=4
fig = plt.figure(figsize=(12, 4))
plt.plot(t[iava], y, ".k", ms=20, label="available samples")
plt.plot(t, x, "k", lw=3, label="original")
plt.plot(t, xne, "b", lw=3, label="normal equations")
plt.plot(t, xne1, "--c", lw=3, label="normal equations (with direct D^T D)")
plt.plot(t, xreg, "-.r", lw=3, label="regularized")
plt.plot(t, xprec, "--g", lw=3, label="preconditioned equations")
plt.legend()
plt.title("Data reconstruction with regularization")
subax = fig.add_axes([0.7, 0.2, 0.15, 0.6])
subax.plot(t[iava], y, ".k", ms=20)
subax.plot(t, x, "k", lw=3)
subax.plot(t, xne, "b", lw=3)
subax.plot(t, xne1, "--c", lw=3)
subax.plot(t, xreg, "-.r", lw=3)
subax.plot(t, xprec, "--g", lw=3)
subax.set_xlim(0.05, 0.3)
plt.tight_layout()
###############################################################################
# Much better estimates! We have seen here how regularization and/or
# preconditioning can be vital to succesfully solve some ill-posed inverse
# problems.
#
# We have however so far only considered solvers that can include additional
# norm-2 regularization terms. A very active area of research is that of
# *sparsity-promoting* solvers (also sometimes referred to as *compressive
# sensing*): the regularization term added to the cost function to minimize has
# norm-p (:math:`p \le 1`) and the problem is generally recasted by considering
# the model to be sparse in some domain. We can follow this philosophy as
# our signal to invert was actually created as superposition of 3 sinusoids
# (i.e., three spikes in the Fourier domain). Our new cost function is:
#
# .. math::
# J_1 = \|\mathbf{y} - \mathbf{R} \mathbf{F} \mathbf{p}\|_2^2 +
# \epsilon \|\mathbf{p}\|_1
#
# where :math:`\mathbf{F}` is the FFT operator. We will thus use the
# :py:class:`pylops.optimization.sparsity.ista` and
# :py:class:`pylops.optimization.sparsity.fista` solvers to estimate our input
# signal.
pista, niteri, costi = pylops.optimization.sparsity.ista(
Rop * FFTop.H,
y,
niter=1000,
eps=0.1,
tol=1e-7,
)
xista = FFTop.H * pista
pfista, niterf, costf = pylops.optimization.sparsity.fista(
Rop * FFTop.H,
y,
niter=1000,
eps=0.1,
tol=1e-7,
)
xfista = FFTop.H * pfista
fig, axs = plt.subplots(2, 1, figsize=(12, 8))
fig.suptitle("Data reconstruction with sparsity", fontsize=14, fontweight="bold", y=0.9)
axs[0].plot(f, np.abs(X), "k", lw=3)
axs[0].plot(f, np.abs(pista), "--r", lw=3)
axs[0].plot(f, np.abs(pfista), "--g", lw=3)
axs[0].set_xlim(0, 30)
axs[0].set_title("Frequency domain")
axs[1].plot(t[iava], y, ".k", ms=20, label="available samples")
axs[1].plot(t, x, "k", lw=3, label="original")
axs[1].plot(t, xista, "--r", lw=3, label="ISTA")
axs[1].plot(t, xfista, "--g", lw=3, label="FISTA")
axs[1].set_title("Time domain")
axs[1].axis("tight")
axs[1].legend()
plt.tight_layout()
plt.subplots_adjust(top=0.8)
fig, ax = plt.subplots(1, 1, figsize=(12, 3))
ax.semilogy(costi, "r", lw=2, label="ISTA")
ax.semilogy(costf, "g", lw=2, label="FISTA")
ax.set_title("Cost functions", size=15, fontweight="bold")
ax.set_xlabel("Iteration")
ax.legend()
ax.grid(True)
plt.tight_layout()
###############################################################################
# As you can see, changing parametrization of the model and imposing sparsity
# in the Fourier domain has given an extra improvement to our ability of
# recovering the underlying densely sampled input signal. Moreover, FISTA
# converges much faster than ISTA as expected and should be preferred when
# using sparse solvers.
#
# Finally we consider a slightly different cost function (note that in this
# case we try to solve a constrained problem):
#
# .. math::
# J_1 = \|\mathbf{p}\|_1
# \quad \text{subject to} \quad \|\mathbf{y} -
# \mathbf{R} \mathbf{F} \mathbf{p}\|
#
# A very popular solver to solve such kind of cost function is called *spgl1*
# and can be accessed via :py:class:`pylops.optimization.sparsity.spgl1`.
xspgl1, pspgl1, info = pylops.optimization.sparsity.spgl1(
Rop, y, SOp=FFTop, tau=3, iter_lim=200
)
fig, axs = plt.subplots(2, 1, figsize=(12, 8))
fig.suptitle("Data reconstruction with SPGL1", fontsize=14, fontweight="bold", y=0.9)
axs[0].plot(f, np.abs(X), "k", lw=3)
axs[0].plot(f, np.abs(pspgl1), "--m", lw=3)
axs[0].set_xlim(0, 30)
axs[0].set_title("Frequency domain")
axs[1].plot(t[iava], y, ".k", ms=20, label="available samples")
axs[1].plot(t, x, "k", lw=3, label="original")
axs[1].plot(t, xspgl1, "--m", lw=3, label="SPGL1")
axs[1].set_title("Time domain")
axs[1].axis("tight")
axs[1].legend()
plt.tight_layout()
plt.subplots_adjust(top=0.8)
fig, ax = plt.subplots(1, 1, figsize=(12, 3))
ax.semilogy(info["rnorm2"], "k", lw=2, label="ISTA")
ax.set_title("Cost functions", size=15, fontweight="bold")
ax.set_xlabel("Iteration")
ax.legend()
ax.grid(True)
plt.tight_layout()
| 14,441 | 36.317829 | 93 | py |
pylops | pylops-master/examples/plot_flip.py | r"""
Flip along an axis
==================
This example shows how to use the :py:class:`pylops.Flip`
operator to simply flip an input signal along an axis.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start with a 1D example. Define an input signal composed of
# ``nt`` samples
nt = 10
x = np.arange(nt)
###############################################################################
# We can now create our flip operator and apply it to the input
# signal. We can also apply the adjoint to the flipped signal and we can
# see how for this operator the adjoint is effectively equivalent to
# the inverse.
Fop = pylops.Flip(nt)
y = Fop * x
xadj = Fop.H * y
plt.figure(figsize=(3, 5))
plt.plot(x, "k", lw=3, label=r"$x$")
plt.plot(y, "r", lw=3, label=r"$y=Fx$")
plt.plot(xadj, "--g", lw=3, label=r"$x_{adj} = F^H y$")
plt.title("Flip in 1st direction", fontsize=14, fontweight="bold")
plt.legend()
plt.tight_layout()
###############################################################################
# Let's now repeat the same exercise on a two dimensional signal. We will
# first flip the model along the first axis and then along the second axis
nt, nx = 10, 5
x = np.outer(np.arange(nt), np.ones(nx))
Fop = pylops.Flip((nt, nx), axis=0)
y = Fop * x
xadj = Fop.H * y
fig, axs = plt.subplots(1, 3, figsize=(7, 3))
fig.suptitle(
"Flip in 1st direction for 2d data", fontsize=14, fontweight="bold", y=0.95
)
axs[0].imshow(x, cmap="rainbow")
axs[0].set_title(r"$x$")
axs[0].axis("tight")
axs[1].imshow(y, cmap="rainbow")
axs[1].set_title(r"$y = F x$")
axs[1].axis("tight")
axs[2].imshow(xadj, cmap="rainbow")
axs[2].set_title(r"$x_{adj} = F^H y$")
axs[2].axis("tight")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
x = np.outer(np.ones(nt), np.arange(nx))
Fop = pylops.Flip(dims=(nt, nx), axis=1)
y = Fop * x
xadj = Fop.H * y
# sphinx_gallery_thumbnail_number = 3
fig, axs = plt.subplots(1, 3, figsize=(7, 3))
fig.suptitle(
"Flip in 2nd direction for 2d data", fontsize=14, fontweight="bold", y=0.95
)
axs[0].imshow(x, cmap="rainbow")
axs[0].set_title(r"$x$")
axs[0].axis("tight")
axs[1].imshow(y, cmap="rainbow")
axs[1].set_title(r"$y = F x$")
axs[1].axis("tight")
axs[2].imshow(xadj, cmap="rainbow")
axs[2].set_title(r"$x_{adj} = F^H y$")
axs[2].axis("tight")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
| 2,455 | 27.55814 | 79 | py |
pylops | pylops-master/examples/plot_tapers.py | """
Tapers
======
This example shows how to create some basic tapers in 1d, 2d, and 3d
using the :py:mod:`pylops.utils.tapers` module.
"""
import matplotlib.pyplot as plt
import pylops
plt.close("all")
############################################
# Let's first define the time and space axes
par = {
"ox": -200,
"dx": 2,
"nx": 201,
"oy": -100,
"dy": 2,
"ny": 101,
"ot": 0,
"dt": 0.004,
"nt": 501,
"ntapx": 21,
"ntapy": 31,
}
############################################
# We can now create tapers in 1d
tap_han = pylops.utils.tapers.hanningtaper(par["nx"], par["ntapx"])
tap_cos = pylops.utils.tapers.cosinetaper(par["nx"], par["ntapx"], False)
tap_cos2 = pylops.utils.tapers.cosinetaper(par["nx"], par["ntapx"], True)
plt.figure(figsize=(7, 3))
plt.plot(tap_han, "r", label="hanning")
plt.plot(tap_cos, "k", label="cosine")
plt.plot(tap_cos2, "b", label="cosine square")
plt.title("Tapers")
plt.legend()
plt.tight_layout()
############################################
# Similarly we can create 2d and 3d tapers with any of the tapers above
tap2d = pylops.utils.tapers.taper2d(par["nt"], par["nx"], par["ntapx"])
plt.figure(figsize=(7, 3))
plt.plot(tap2d[:, par["nt"] // 2], "k", lw=2)
plt.title("Taper")
plt.tight_layout()
tap3d = pylops.utils.tapers.taper3d(
par["nt"], (par["ny"], par["nx"]), (par["ntapy"], par["ntapx"])
)
plt.figure(figsize=(7, 3))
plt.imshow(tap3d[:, :, par["nt"] // 2], "jet")
plt.title("Taper in y-x slice")
plt.xlabel("x")
plt.ylabel("y")
plt.tight_layout()
| 1,541 | 23.870968 | 73 | py |
pylops | pylops-master/examples/plot_symmetrize.py | r"""
Symmetrize
==========
This example shows how to use the :py:class:`pylops.Symmetrize`
operator which takes an input signal and returns a symmetric signal
by pre-pending the input signal in reversed order. Such an operation can be
inverted as we will see in this example.
Moreover the :py:class:`pylops.Symmetrize` can be used as *preconditioning*
to any inverse problem where we are after inverting for a signal that we
want to ensure is symmetric. Refer to :ref:`sphx_glr_gallery_plot_wavest.py`
for an example of such a type.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start with a 1D example. Define an input signal composed of
# ``nt`` samples
nt = 10
x = np.arange(nt)
###############################################################################
# We can now create our flip operator and apply it to the input
# signal. We can also apply the adjoint to the flipped signal and we can
# see how for this operator the adjoint is effectively equivalent to
# the inverse.
Sop = pylops.Symmetrize(nt)
y = Sop * x
xadj = Sop.H * y
xinv = Sop / y
plt.figure(figsize=(7, 3))
plt.plot(x, "k", lw=3, label=r"$x$")
plt.plot(y, "r", lw=3, label=r"$y=Fx$")
plt.plot(xadj, "--g", lw=3, label=r"$x_{adj} = F^H y$")
plt.plot(xinv, "--m", lw=3, label=r"$x_{inv} = F^{-1} y$")
plt.title("Symmetrize in 1st direction", fontsize=14, fontweight="bold")
plt.legend()
plt.tight_layout()
###############################################################################
# Let's now repeat the same exercise on a two dimensional signal. We will
# first flip the model along the first axis and then along the second axis
nt, nx = 10, 6
x = np.outer(np.arange(nt), np.ones(nx))
Sop = pylops.Symmetrize((nt, nx), axis=0)
y = Sop * x
xadj = Sop.H * y
xinv = Sop / y.ravel()
xinv = xinv.reshape(Sop.dims)
fig, axs = plt.subplots(1, 3, figsize=(7, 3))
fig.suptitle(
"Symmetrize in 2nd direction for 2d data", fontsize=14, fontweight="bold", y=0.95
)
axs[0].imshow(x, cmap="rainbow", vmin=0, vmax=9)
axs[0].set_title(r"$x$")
axs[0].axis("tight")
axs[1].imshow(y, cmap="rainbow", vmin=0, vmax=9)
axs[1].set_title(r"$y=Fx$")
axs[1].axis("tight")
axs[2].imshow(xinv, cmap="rainbow", vmin=0, vmax=9)
axs[2].set_title(r"$x_{adj}=F^{-1}y$")
axs[2].axis("tight")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
x = np.outer(np.ones(nt), np.arange(nx))
Sop = pylops.Symmetrize((nt, nx), axis=1)
y = Sop * x
xadj = Sop.H * y
xinv = Sop / y.ravel()
xinv = xinv.reshape(Sop.dims)
# sphinx_gallery_thumbnail_number = 3
fig, axs = plt.subplots(1, 3, figsize=(7, 3))
fig.suptitle(
"Symmetrize in 2nd direction for 2d data", fontsize=14, fontweight="bold", y=0.95
)
axs[0].imshow(x, cmap="rainbow", vmin=0, vmax=9)
axs[0].set_title(r"$x$")
axs[0].axis("tight")
axs[1].imshow(y, cmap="rainbow", vmin=0, vmax=9)
axs[1].set_title(r"$y=Fx$")
axs[1].axis("tight")
axs[2].imshow(xinv, cmap="rainbow", vmin=0, vmax=9)
axs[2].set_title(r"$x_{adj}=F^{-1}y$")
axs[2].axis("tight")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
| 3,135 | 30.36 | 85 | py |
pylops | pylops-master/examples/plot_smoothing1d.py | r"""
1D Smoothing
============
This example shows how to use the :py:class:`pylops.Smoothing1D` operator
to smooth an input signal along a given axis.
Derivative (or roughening) operators are generally used *regularization*
in inverse problems. Smoothing has the opposite effect of roughening and
it can be employed as *preconditioning* in inverse problems.
A smoothing operator is a simple compact filter on lenght :math:`n_{smooth}`
and each elements is equal to :math:`1/n_{smooth}`.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Define the input parameters: number of samples of input signal (``N``) and
# lenght of the smoothing filter regression coefficients (:math:`n_{smooth}`).
# In this first case the input signal is one at the center and zero elsewhere.
N = 31
nsmooth = 7
x = np.zeros(N)
x[int(N / 2)] = 1
Sop = pylops.Smoothing1D(nsmooth=nsmooth, dims=[N], dtype="float32")
y = Sop * x
xadj = Sop.H * y
fig, ax = plt.subplots(1, 1, figsize=(10, 3))
ax.plot(x, "k", lw=2, label=r"$x$")
ax.plot(y, "r", lw=2, label=r"$y=Ax$")
ax.set_title("Smoothing in 1st direction", fontsize=14, fontweight="bold")
ax.legend()
plt.tight_layout()
###############################################################################
# Let's repeat the same exercise with a random signal as input. After applying smoothing,
# we will also try to invert it.
N = 120
nsmooth = 13
x = np.random.normal(0, 1, N)
Sop = pylops.Smoothing1D(nsmooth=13, dims=(N), dtype="float32")
y = Sop * x
xest = Sop / y
fig, ax = plt.subplots(1, 1, figsize=(10, 3))
ax.plot(x, "k", lw=2, label=r"$x$")
ax.plot(y, "r", lw=2, label=r"$y=Ax$")
ax.plot(xest, "--g", lw=2, label=r"$x_{ext}$")
ax.set_title("Smoothing in 1st direction", fontsize=14, fontweight="bold")
ax.legend()
plt.tight_layout()
###############################################################################
# Finally we show that the same operator can be applied to multi-dimensional
# data along a chosen axis.
A = np.zeros((11, 21))
A[5, 10] = 1
Sop = pylops.Smoothing1D(nsmooth=5, dims=(11, 21), axis=0, dtype="float64")
B = Sop * A
fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle(
"Smoothing in 1st direction for 2d data", fontsize=14, fontweight="bold", y=0.95
)
im = axs[0].imshow(A, interpolation="nearest", vmin=0, vmax=1)
axs[0].axis("tight")
axs[0].set_title("Model")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(B, interpolation="nearest", vmin=0, vmax=1)
axs[1].axis("tight")
axs[1].set_title("Data")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
| 2,681 | 30.186047 | 89 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.