File size: 5,022 Bytes
0161e74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 | import torch
import torch.nn as nn
from model_jit import JiT_models
class Denoiser(nn.Module):
def __init__(
self,
args
):
super().__init__()
self.net = JiT_models[args.model](
input_size=args.img_size,
in_channels=3,
num_classes=args.class_num,
attn_drop=args.attn_dropout,
proj_drop=args.proj_dropout,
)
self.img_size = args.img_size
self.num_classes = args.class_num
self.label_drop_prob = args.label_drop_prob
self.P_mean = args.P_mean
self.P_std = args.P_std
self.t_eps = args.t_eps
self.noise_scale = args.noise_scale
# ema
self.ema_decay1 = args.ema_decay1
self.ema_decay2 = args.ema_decay2
self.ema_params1 = None
self.ema_params2 = None
# generation hyper params
self.method = args.sampling_method
self.steps = args.num_sampling_steps
self.cfg_scale = args.cfg
self.cfg_interval = (args.interval_min, args.interval_max)
# Alan change: Autoguidance
self.ag_net = None
if args.autoguidance_ckpt != "":
self.ag_net = JiT_models['JiT-S/16'](
input_size=args.img_size,
in_channels=3,
num_classes=args.class_num,
attn_drop=args.attn_dropout,
proj_drop=args.proj_dropout,
)
self.ag_net.requires_grad_(False)
def drop_labels(self, labels):
drop = torch.rand(labels.shape[0], device=labels.device) < self.label_drop_prob
out = torch.where(drop, torch.full_like(labels, self.num_classes), labels)
return out
def sample_t(self, n: int, device=None):
z = torch.randn(n, device=device) * self.P_std + self.P_mean
return torch.sigmoid(z)
def forward(self, x, labels):
labels_dropped = self.drop_labels(labels) if self.training else labels
t = self.sample_t(x.size(0), device=x.device).view(-1, *([1] * (x.ndim - 1)))
e = torch.randn_like(x) * self.noise_scale
z = t * x + (1 - t) * e
v = (x - z) / (1 - t).clamp_min(self.t_eps)
x_pred = self.net(z, t.flatten(), labels_dropped)
v_pred = (x_pred - z) / (1 - t).clamp_min(self.t_eps)
# l2 loss
loss = (v - v_pred) ** 2
loss = loss.mean(dim=(1, 2, 3)).mean()
return loss, {}
@torch.no_grad()
def generate(self, labels):
device = labels.device
bsz = labels.size(0)
z = self.noise_scale * torch.randn(bsz, 3, self.img_size, self.img_size, device=device)
timesteps = torch.linspace(0.0, 1.0, self.steps+1, device=device).view(-1, *([1] * z.ndim)).expand(-1, bsz, -1, -1, -1)
if self.method == "euler":
stepper = self._euler_step
elif self.method == "heun":
stepper = self._heun_step
else:
raise NotImplementedError
# ode
for i in range(self.steps - 1):
t = timesteps[i]
t_next = timesteps[i + 1]
z = stepper(z, t, t_next, labels)
# last step euler
z = self._euler_step(z, timesteps[-2], timesteps[-1], labels)
return z
@torch.no_grad()
def _forward_sample(self, z, t, labels):
# conditional
x_cond = self.net(z, t.flatten(), labels)
v_cond = (x_cond - z) / (1.0 - t).clamp_min(self.t_eps)
# unconditional
if self.ag_net is not None:
x_uncond = self.ag_net(z, t.flatten(), torch.full_like(labels, self.num_classes))
else:
x_uncond = self.net(z, t.flatten(), torch.full_like(labels, self.num_classes))
v_uncond = (x_uncond - z) / (1.0 - t).clamp_min(self.t_eps)
# cfg interval
low, high = self.cfg_interval
interval_mask = (t < high) & ((low == 0) | (t > low))
cfg_scale_interval = torch.where(interval_mask, self.cfg_scale, 1.0)
return v_uncond + cfg_scale_interval * (v_cond - v_uncond)
@torch.no_grad()
def _euler_step(self, z, t, t_next, labels):
v_pred = self._forward_sample(z, t, labels)
z_next = z + (t_next - t) * v_pred
return z_next
@torch.no_grad()
def _heun_step(self, z, t, t_next, labels):
v_pred_t = self._forward_sample(z, t, labels)
z_next_euler = z + (t_next - t) * v_pred_t
v_pred_t_next = self._forward_sample(z_next_euler, t_next, labels)
v_pred = 0.5 * (v_pred_t + v_pred_t_next)
z_next = z + (t_next - t) * v_pred
return z_next
@torch.no_grad()
def update_ema(self):
source_params = list(self.parameters())
for targ, src in zip(self.ema_params1, source_params):
targ.detach().mul_(self.ema_decay1).add_(src, alpha=1 - self.ema_decay1)
for targ, src in zip(self.ema_params2, source_params):
targ.detach().mul_(self.ema_decay2).add_(src, alpha=1 - self.ema_decay2)
|