Upload 3 files
Browse files- modules/__init__.py +1 -0
- modules/sampler.py +101 -0
- modules/uni_pc.py +863 -0
modules/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .sampler import UniPCSampler # noqa: F401
|
modules/sampler.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SAMPLING ONLY."""
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from .uni_pc import NoiseScheduleVP, model_wrapper, UniPC
|
| 6 |
+
from modules import shared, devices
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class UniPCSampler(object):
|
| 10 |
+
def __init__(self, model, **kwargs):
|
| 11 |
+
super().__init__()
|
| 12 |
+
self.model = model
|
| 13 |
+
to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
|
| 14 |
+
self.before_sample = None
|
| 15 |
+
self.after_sample = None
|
| 16 |
+
self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
|
| 17 |
+
|
| 18 |
+
def register_buffer(self, name, attr):
|
| 19 |
+
if type(attr) == torch.Tensor:
|
| 20 |
+
if attr.device != devices.device:
|
| 21 |
+
attr = attr.to(devices.device)
|
| 22 |
+
setattr(self, name, attr)
|
| 23 |
+
|
| 24 |
+
def set_hooks(self, before_sample, after_sample, after_update):
|
| 25 |
+
self.before_sample = before_sample
|
| 26 |
+
self.after_sample = after_sample
|
| 27 |
+
self.after_update = after_update
|
| 28 |
+
|
| 29 |
+
@torch.no_grad()
|
| 30 |
+
def sample(self,
|
| 31 |
+
S,
|
| 32 |
+
batch_size,
|
| 33 |
+
shape,
|
| 34 |
+
conditioning=None,
|
| 35 |
+
callback=None,
|
| 36 |
+
normals_sequence=None,
|
| 37 |
+
img_callback=None,
|
| 38 |
+
quantize_x0=False,
|
| 39 |
+
eta=0.,
|
| 40 |
+
mask=None,
|
| 41 |
+
x0=None,
|
| 42 |
+
temperature=1.,
|
| 43 |
+
noise_dropout=0.,
|
| 44 |
+
score_corrector=None,
|
| 45 |
+
corrector_kwargs=None,
|
| 46 |
+
verbose=True,
|
| 47 |
+
x_T=None,
|
| 48 |
+
log_every_t=100,
|
| 49 |
+
unconditional_guidance_scale=1.,
|
| 50 |
+
unconditional_conditioning=None,
|
| 51 |
+
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
| 52 |
+
**kwargs
|
| 53 |
+
):
|
| 54 |
+
if conditioning is not None:
|
| 55 |
+
if isinstance(conditioning, dict):
|
| 56 |
+
ctmp = conditioning[list(conditioning.keys())[0]]
|
| 57 |
+
while isinstance(ctmp, list):
|
| 58 |
+
ctmp = ctmp[0]
|
| 59 |
+
cbs = ctmp.shape[0]
|
| 60 |
+
if cbs != batch_size:
|
| 61 |
+
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
| 62 |
+
|
| 63 |
+
elif isinstance(conditioning, list):
|
| 64 |
+
for ctmp in conditioning:
|
| 65 |
+
if ctmp.shape[0] != batch_size:
|
| 66 |
+
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
| 67 |
+
|
| 68 |
+
else:
|
| 69 |
+
if conditioning.shape[0] != batch_size:
|
| 70 |
+
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
| 71 |
+
|
| 72 |
+
# sampling
|
| 73 |
+
C, H, W = shape
|
| 74 |
+
size = (batch_size, C, H, W)
|
| 75 |
+
# print(f'Data shape for UniPC sampling is {size}')
|
| 76 |
+
|
| 77 |
+
device = self.model.betas.device
|
| 78 |
+
if x_T is None:
|
| 79 |
+
img = torch.randn(size, device=device)
|
| 80 |
+
else:
|
| 81 |
+
img = x_T
|
| 82 |
+
|
| 83 |
+
ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
|
| 84 |
+
|
| 85 |
+
# SD 1.X is "noise", SD 2.X is "v"
|
| 86 |
+
model_type = "v" if self.model.parameterization == "v" else "noise"
|
| 87 |
+
|
| 88 |
+
model_fn = model_wrapper(
|
| 89 |
+
lambda x, t, c: self.model.apply_model(x, t, c),
|
| 90 |
+
ns,
|
| 91 |
+
model_type=model_type,
|
| 92 |
+
guidance_type="classifier-free",
|
| 93 |
+
#condition=conditioning,
|
| 94 |
+
#unconditional_condition=unconditional_conditioning,
|
| 95 |
+
guidance_scale=unconditional_guidance_scale,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant, condition=conditioning, unconditional_condition=unconditional_conditioning, before_sample=self.before_sample, after_sample=self.after_sample, after_update=self.after_update)
|
| 99 |
+
x = uni_pc.sample(img, steps=S, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
|
| 100 |
+
|
| 101 |
+
return x.to(device), None
|
modules/uni_pc.py
ADDED
|
@@ -0,0 +1,863 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import math
|
| 3 |
+
import tqdm
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class NoiseScheduleVP:
|
| 7 |
+
def __init__(
|
| 8 |
+
self,
|
| 9 |
+
schedule='discrete',
|
| 10 |
+
betas=None,
|
| 11 |
+
alphas_cumprod=None,
|
| 12 |
+
continuous_beta_0=0.1,
|
| 13 |
+
continuous_beta_1=20.,
|
| 14 |
+
):
|
| 15 |
+
"""Create a wrapper class for the forward SDE (VP type).
|
| 16 |
+
|
| 17 |
+
***
|
| 18 |
+
Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
|
| 19 |
+
We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
|
| 20 |
+
***
|
| 21 |
+
|
| 22 |
+
The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
|
| 23 |
+
We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
|
| 24 |
+
Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
|
| 25 |
+
|
| 26 |
+
log_alpha_t = self.marginal_log_mean_coeff(t)
|
| 27 |
+
sigma_t = self.marginal_std(t)
|
| 28 |
+
lambda_t = self.marginal_lambda(t)
|
| 29 |
+
|
| 30 |
+
Moreover, as lambda(t) is an invertible function, we also support its inverse function:
|
| 31 |
+
|
| 32 |
+
t = self.inverse_lambda(lambda_t)
|
| 33 |
+
|
| 34 |
+
===============================================================
|
| 35 |
+
|
| 36 |
+
We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
|
| 37 |
+
|
| 38 |
+
1. For discrete-time DPMs:
|
| 39 |
+
|
| 40 |
+
For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
|
| 41 |
+
t_i = (i + 1) / N
|
| 42 |
+
e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
|
| 43 |
+
We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
|
| 47 |
+
alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
|
| 48 |
+
|
| 49 |
+
Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
|
| 50 |
+
|
| 51 |
+
**Important**: Please pay special attention for the args for `alphas_cumprod`:
|
| 52 |
+
The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
|
| 53 |
+
q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
|
| 54 |
+
Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
|
| 55 |
+
alpha_{t_n} = \sqrt{\hat{alpha_n}},
|
| 56 |
+
and
|
| 57 |
+
log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
2. For continuous-time DPMs:
|
| 61 |
+
|
| 62 |
+
We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
|
| 63 |
+
schedule are the default settings in DDPM and improved-DDPM:
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
beta_min: A `float` number. The smallest beta for the linear schedule.
|
| 67 |
+
beta_max: A `float` number. The largest beta for the linear schedule.
|
| 68 |
+
cosine_s: A `float` number. The hyperparameter in the cosine schedule.
|
| 69 |
+
cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
|
| 70 |
+
T: A `float` number. The ending time of the forward process.
|
| 71 |
+
|
| 72 |
+
===============================================================
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
|
| 76 |
+
'linear' or 'cosine' for continuous-time DPMs.
|
| 77 |
+
Returns:
|
| 78 |
+
A wrapper object of the forward SDE (VP type).
|
| 79 |
+
|
| 80 |
+
===============================================================
|
| 81 |
+
|
| 82 |
+
Example:
|
| 83 |
+
|
| 84 |
+
# For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
|
| 85 |
+
>>> ns = NoiseScheduleVP('discrete', betas=betas)
|
| 86 |
+
|
| 87 |
+
# For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
|
| 88 |
+
>>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
|
| 89 |
+
|
| 90 |
+
# For continuous-time DPMs (VPSDE), linear schedule:
|
| 91 |
+
>>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
|
| 92 |
+
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
if schedule not in ['discrete', 'linear', 'cosine']:
|
| 96 |
+
raise ValueError(f"Unsupported noise schedule {schedule}. The schedule needs to be 'discrete' or 'linear' or 'cosine'")
|
| 97 |
+
|
| 98 |
+
self.schedule = schedule
|
| 99 |
+
if schedule == 'discrete':
|
| 100 |
+
if betas is not None:
|
| 101 |
+
log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
|
| 102 |
+
else:
|
| 103 |
+
assert alphas_cumprod is not None
|
| 104 |
+
log_alphas = 0.5 * torch.log(alphas_cumprod)
|
| 105 |
+
self.total_N = len(log_alphas)
|
| 106 |
+
self.T = 1.
|
| 107 |
+
self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
|
| 108 |
+
self.log_alpha_array = log_alphas.reshape((1, -1,))
|
| 109 |
+
else:
|
| 110 |
+
self.total_N = 1000
|
| 111 |
+
self.beta_0 = continuous_beta_0
|
| 112 |
+
self.beta_1 = continuous_beta_1
|
| 113 |
+
self.cosine_s = 0.008
|
| 114 |
+
self.cosine_beta_max = 999.
|
| 115 |
+
self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
|
| 116 |
+
self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
|
| 117 |
+
self.schedule = schedule
|
| 118 |
+
if schedule == 'cosine':
|
| 119 |
+
# For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
|
| 120 |
+
# Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
|
| 121 |
+
self.T = 0.9946
|
| 122 |
+
else:
|
| 123 |
+
self.T = 1.
|
| 124 |
+
|
| 125 |
+
def marginal_log_mean_coeff(self, t):
|
| 126 |
+
"""
|
| 127 |
+
Compute log(alpha_t) of a given continuous-time label t in [0, T].
|
| 128 |
+
"""
|
| 129 |
+
if self.schedule == 'discrete':
|
| 130 |
+
return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
|
| 131 |
+
elif self.schedule == 'linear':
|
| 132 |
+
return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
|
| 133 |
+
elif self.schedule == 'cosine':
|
| 134 |
+
log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
|
| 135 |
+
log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
|
| 136 |
+
return log_alpha_t
|
| 137 |
+
|
| 138 |
+
def marginal_alpha(self, t):
|
| 139 |
+
"""
|
| 140 |
+
Compute alpha_t of a given continuous-time label t in [0, T].
|
| 141 |
+
"""
|
| 142 |
+
return torch.exp(self.marginal_log_mean_coeff(t))
|
| 143 |
+
|
| 144 |
+
def marginal_std(self, t):
|
| 145 |
+
"""
|
| 146 |
+
Compute sigma_t of a given continuous-time label t in [0, T].
|
| 147 |
+
"""
|
| 148 |
+
return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
|
| 149 |
+
|
| 150 |
+
def marginal_lambda(self, t):
|
| 151 |
+
"""
|
| 152 |
+
Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
|
| 153 |
+
"""
|
| 154 |
+
log_mean_coeff = self.marginal_log_mean_coeff(t)
|
| 155 |
+
log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
|
| 156 |
+
return log_mean_coeff - log_std
|
| 157 |
+
|
| 158 |
+
def inverse_lambda(self, lamb):
|
| 159 |
+
"""
|
| 160 |
+
Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
|
| 161 |
+
"""
|
| 162 |
+
if self.schedule == 'linear':
|
| 163 |
+
tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
|
| 164 |
+
Delta = self.beta_0**2 + tmp
|
| 165 |
+
return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
|
| 166 |
+
elif self.schedule == 'discrete':
|
| 167 |
+
log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
|
| 168 |
+
t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
|
| 169 |
+
return t.reshape((-1,))
|
| 170 |
+
else:
|
| 171 |
+
log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
|
| 172 |
+
t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
|
| 173 |
+
t = t_fn(log_alpha)
|
| 174 |
+
return t
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def model_wrapper(
|
| 178 |
+
model,
|
| 179 |
+
noise_schedule,
|
| 180 |
+
model_type="noise",
|
| 181 |
+
model_kwargs=None,
|
| 182 |
+
guidance_type="uncond",
|
| 183 |
+
#condition=None,
|
| 184 |
+
#unconditional_condition=None,
|
| 185 |
+
guidance_scale=1.,
|
| 186 |
+
classifier_fn=None,
|
| 187 |
+
classifier_kwargs=None,
|
| 188 |
+
):
|
| 189 |
+
"""Create a wrapper function for the noise prediction model.
|
| 190 |
+
|
| 191 |
+
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
|
| 192 |
+
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
|
| 193 |
+
|
| 194 |
+
We support four types of the diffusion model by setting `model_type`:
|
| 195 |
+
|
| 196 |
+
1. "noise": noise prediction model. (Trained by predicting noise).
|
| 197 |
+
|
| 198 |
+
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
|
| 199 |
+
|
| 200 |
+
3. "v": velocity prediction model. (Trained by predicting the velocity).
|
| 201 |
+
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
|
| 202 |
+
|
| 203 |
+
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
|
| 204 |
+
arXiv preprint arXiv:2202.00512 (2022).
|
| 205 |
+
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
|
| 206 |
+
arXiv preprint arXiv:2210.02303 (2022).
|
| 207 |
+
|
| 208 |
+
4. "score": marginal score function. (Trained by denoising score matching).
|
| 209 |
+
Note that the score function and the noise prediction model follows a simple relationship:
|
| 210 |
+
```
|
| 211 |
+
noise(x_t, t) = -sigma_t * score(x_t, t)
|
| 212 |
+
```
|
| 213 |
+
|
| 214 |
+
We support three types of guided sampling by DPMs by setting `guidance_type`:
|
| 215 |
+
1. "uncond": unconditional sampling by DPMs.
|
| 216 |
+
The input `model` has the following format:
|
| 217 |
+
``
|
| 218 |
+
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
| 219 |
+
``
|
| 220 |
+
|
| 221 |
+
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
|
| 222 |
+
The input `model` has the following format:
|
| 223 |
+
``
|
| 224 |
+
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
| 225 |
+
``
|
| 226 |
+
|
| 227 |
+
The input `classifier_fn` has the following format:
|
| 228 |
+
``
|
| 229 |
+
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
|
| 230 |
+
``
|
| 231 |
+
|
| 232 |
+
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
|
| 233 |
+
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
|
| 234 |
+
|
| 235 |
+
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
|
| 236 |
+
The input `model` has the following format:
|
| 237 |
+
``
|
| 238 |
+
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
|
| 239 |
+
``
|
| 240 |
+
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
|
| 241 |
+
|
| 242 |
+
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
|
| 243 |
+
arXiv preprint arXiv:2207.12598 (2022).
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
|
| 247 |
+
or continuous-time labels (i.e. epsilon to T).
|
| 248 |
+
|
| 249 |
+
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
|
| 250 |
+
``
|
| 251 |
+
def model_fn(x, t_continuous) -> noise:
|
| 252 |
+
t_input = get_model_input_time(t_continuous)
|
| 253 |
+
return noise_pred(model, x, t_input, **model_kwargs)
|
| 254 |
+
``
|
| 255 |
+
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
|
| 256 |
+
|
| 257 |
+
===============================================================
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
model: A diffusion model with the corresponding format described above.
|
| 261 |
+
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
|
| 262 |
+
model_type: A `str`. The parameterization type of the diffusion model.
|
| 263 |
+
"noise" or "x_start" or "v" or "score".
|
| 264 |
+
model_kwargs: A `dict`. A dict for the other inputs of the model function.
|
| 265 |
+
guidance_type: A `str`. The type of the guidance for sampling.
|
| 266 |
+
"uncond" or "classifier" or "classifier-free".
|
| 267 |
+
condition: A pytorch tensor. The condition for the guided sampling.
|
| 268 |
+
Only used for "classifier" or "classifier-free" guidance type.
|
| 269 |
+
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
|
| 270 |
+
Only used for "classifier-free" guidance type.
|
| 271 |
+
guidance_scale: A `float`. The scale for the guided sampling.
|
| 272 |
+
classifier_fn: A classifier function. Only used for the classifier guidance.
|
| 273 |
+
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
|
| 274 |
+
Returns:
|
| 275 |
+
A noise prediction model that accepts the noised data and the continuous time as the inputs.
|
| 276 |
+
"""
|
| 277 |
+
|
| 278 |
+
model_kwargs = model_kwargs or {}
|
| 279 |
+
classifier_kwargs = classifier_kwargs or {}
|
| 280 |
+
|
| 281 |
+
def get_model_input_time(t_continuous):
|
| 282 |
+
"""
|
| 283 |
+
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
|
| 284 |
+
For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
|
| 285 |
+
For continuous-time DPMs, we just use `t_continuous`.
|
| 286 |
+
"""
|
| 287 |
+
if noise_schedule.schedule == 'discrete':
|
| 288 |
+
return (t_continuous - 1. / noise_schedule.total_N) * 1000.
|
| 289 |
+
else:
|
| 290 |
+
return t_continuous
|
| 291 |
+
|
| 292 |
+
def noise_pred_fn(x, t_continuous, cond=None):
|
| 293 |
+
if t_continuous.reshape((-1,)).shape[0] == 1:
|
| 294 |
+
t_continuous = t_continuous.expand((x.shape[0]))
|
| 295 |
+
t_input = get_model_input_time(t_continuous)
|
| 296 |
+
if cond is None:
|
| 297 |
+
output = model(x, t_input, None, **model_kwargs)
|
| 298 |
+
else:
|
| 299 |
+
output = model(x, t_input, cond, **model_kwargs)
|
| 300 |
+
if model_type == "noise":
|
| 301 |
+
return output
|
| 302 |
+
elif model_type == "x_start":
|
| 303 |
+
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
|
| 304 |
+
dims = x.dim()
|
| 305 |
+
return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
|
| 306 |
+
elif model_type == "v":
|
| 307 |
+
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
|
| 308 |
+
dims = x.dim()
|
| 309 |
+
return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
|
| 310 |
+
elif model_type == "score":
|
| 311 |
+
sigma_t = noise_schedule.marginal_std(t_continuous)
|
| 312 |
+
dims = x.dim()
|
| 313 |
+
return -expand_dims(sigma_t, dims) * output
|
| 314 |
+
|
| 315 |
+
def cond_grad_fn(x, t_input, condition):
|
| 316 |
+
"""
|
| 317 |
+
Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
|
| 318 |
+
"""
|
| 319 |
+
with torch.enable_grad():
|
| 320 |
+
x_in = x.detach().requires_grad_(True)
|
| 321 |
+
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
|
| 322 |
+
return torch.autograd.grad(log_prob.sum(), x_in)[0]
|
| 323 |
+
|
| 324 |
+
def model_fn(x, t_continuous, condition, unconditional_condition):
|
| 325 |
+
"""
|
| 326 |
+
The noise predicition model function that is used for DPM-Solver.
|
| 327 |
+
"""
|
| 328 |
+
if t_continuous.reshape((-1,)).shape[0] == 1:
|
| 329 |
+
t_continuous = t_continuous.expand((x.shape[0]))
|
| 330 |
+
if guidance_type == "uncond":
|
| 331 |
+
return noise_pred_fn(x, t_continuous)
|
| 332 |
+
elif guidance_type == "classifier":
|
| 333 |
+
assert classifier_fn is not None
|
| 334 |
+
t_input = get_model_input_time(t_continuous)
|
| 335 |
+
cond_grad = cond_grad_fn(x, t_input, condition)
|
| 336 |
+
sigma_t = noise_schedule.marginal_std(t_continuous)
|
| 337 |
+
noise = noise_pred_fn(x, t_continuous)
|
| 338 |
+
return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
|
| 339 |
+
elif guidance_type == "classifier-free":
|
| 340 |
+
if guidance_scale == 1. or unconditional_condition is None:
|
| 341 |
+
return noise_pred_fn(x, t_continuous, cond=condition)
|
| 342 |
+
else:
|
| 343 |
+
x_in = torch.cat([x] * 2)
|
| 344 |
+
t_in = torch.cat([t_continuous] * 2)
|
| 345 |
+
if isinstance(condition, dict):
|
| 346 |
+
assert isinstance(unconditional_condition, dict)
|
| 347 |
+
c_in = {}
|
| 348 |
+
for k in condition:
|
| 349 |
+
if isinstance(condition[k], list):
|
| 350 |
+
c_in[k] = [torch.cat([
|
| 351 |
+
unconditional_condition[k][i],
|
| 352 |
+
condition[k][i]]) for i in range(len(condition[k]))]
|
| 353 |
+
else:
|
| 354 |
+
c_in[k] = torch.cat([
|
| 355 |
+
unconditional_condition[k],
|
| 356 |
+
condition[k]])
|
| 357 |
+
elif isinstance(condition, list):
|
| 358 |
+
c_in = []
|
| 359 |
+
assert isinstance(unconditional_condition, list)
|
| 360 |
+
for i in range(len(condition)):
|
| 361 |
+
c_in.append(torch.cat([unconditional_condition[i], condition[i]]))
|
| 362 |
+
else:
|
| 363 |
+
c_in = torch.cat([unconditional_condition, condition])
|
| 364 |
+
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
|
| 365 |
+
return noise_uncond + guidance_scale * (noise - noise_uncond)
|
| 366 |
+
|
| 367 |
+
assert model_type in ["noise", "x_start", "v"]
|
| 368 |
+
assert guidance_type in ["uncond", "classifier", "classifier-free"]
|
| 369 |
+
return model_fn
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
class UniPC:
|
| 373 |
+
def __init__(
|
| 374 |
+
self,
|
| 375 |
+
model_fn,
|
| 376 |
+
noise_schedule,
|
| 377 |
+
predict_x0=True,
|
| 378 |
+
thresholding=False,
|
| 379 |
+
max_val=1.,
|
| 380 |
+
variant='bh1',
|
| 381 |
+
condition=None,
|
| 382 |
+
unconditional_condition=None,
|
| 383 |
+
before_sample=None,
|
| 384 |
+
after_sample=None,
|
| 385 |
+
after_update=None
|
| 386 |
+
):
|
| 387 |
+
"""Construct a UniPC.
|
| 388 |
+
|
| 389 |
+
We support both data_prediction and noise_prediction.
|
| 390 |
+
"""
|
| 391 |
+
self.model_fn_ = model_fn
|
| 392 |
+
self.noise_schedule = noise_schedule
|
| 393 |
+
self.variant = variant
|
| 394 |
+
self.predict_x0 = predict_x0
|
| 395 |
+
self.thresholding = thresholding
|
| 396 |
+
self.max_val = max_val
|
| 397 |
+
self.condition = condition
|
| 398 |
+
self.unconditional_condition = unconditional_condition
|
| 399 |
+
self.before_sample = before_sample
|
| 400 |
+
self.after_sample = after_sample
|
| 401 |
+
self.after_update = after_update
|
| 402 |
+
|
| 403 |
+
def dynamic_thresholding_fn(self, x0, t=None):
|
| 404 |
+
"""
|
| 405 |
+
The dynamic thresholding method.
|
| 406 |
+
"""
|
| 407 |
+
dims = x0.dim()
|
| 408 |
+
p = self.dynamic_thresholding_ratio
|
| 409 |
+
s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
|
| 410 |
+
s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)
|
| 411 |
+
x0 = torch.clamp(x0, -s, s) / s
|
| 412 |
+
return x0
|
| 413 |
+
|
| 414 |
+
def model(self, x, t):
|
| 415 |
+
cond = self.condition
|
| 416 |
+
uncond = self.unconditional_condition
|
| 417 |
+
if self.before_sample is not None:
|
| 418 |
+
x, t, cond, uncond = self.before_sample(x, t, cond, uncond)
|
| 419 |
+
res = self.model_fn_(x, t, cond, uncond)
|
| 420 |
+
if self.after_sample is not None:
|
| 421 |
+
x, t, cond, uncond, res = self.after_sample(x, t, cond, uncond, res)
|
| 422 |
+
|
| 423 |
+
if isinstance(res, tuple):
|
| 424 |
+
# (None, pred_x0)
|
| 425 |
+
res = res[1]
|
| 426 |
+
|
| 427 |
+
return res
|
| 428 |
+
|
| 429 |
+
def noise_prediction_fn(self, x, t):
|
| 430 |
+
"""
|
| 431 |
+
Return the noise prediction model.
|
| 432 |
+
"""
|
| 433 |
+
return self.model(x, t)
|
| 434 |
+
|
| 435 |
+
def data_prediction_fn(self, x, t):
|
| 436 |
+
"""
|
| 437 |
+
Return the data prediction model (with thresholding).
|
| 438 |
+
"""
|
| 439 |
+
noise = self.noise_prediction_fn(x, t)
|
| 440 |
+
dims = x.dim()
|
| 441 |
+
alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
|
| 442 |
+
x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
|
| 443 |
+
if self.thresholding:
|
| 444 |
+
p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
|
| 445 |
+
s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
|
| 446 |
+
s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
|
| 447 |
+
x0 = torch.clamp(x0, -s, s) / s
|
| 448 |
+
return x0
|
| 449 |
+
|
| 450 |
+
def model_fn(self, x, t):
|
| 451 |
+
"""
|
| 452 |
+
Convert the model to the noise prediction model or the data prediction model.
|
| 453 |
+
"""
|
| 454 |
+
if self.predict_x0:
|
| 455 |
+
return self.data_prediction_fn(x, t)
|
| 456 |
+
else:
|
| 457 |
+
return self.noise_prediction_fn(x, t)
|
| 458 |
+
|
| 459 |
+
def get_time_steps(self, skip_type, t_T, t_0, N, device):
|
| 460 |
+
"""Compute the intermediate time steps for sampling.
|
| 461 |
+
"""
|
| 462 |
+
if skip_type == 'logSNR':
|
| 463 |
+
lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
|
| 464 |
+
lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
|
| 465 |
+
logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
|
| 466 |
+
return self.noise_schedule.inverse_lambda(logSNR_steps)
|
| 467 |
+
elif skip_type == 'time_uniform':
|
| 468 |
+
return torch.linspace(t_T, t_0, N + 1).to(device)
|
| 469 |
+
elif skip_type == 'time_quadratic':
|
| 470 |
+
t_order = 2
|
| 471 |
+
t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
|
| 472 |
+
return t
|
| 473 |
+
else:
|
| 474 |
+
raise ValueError(f"Unsupported skip_type {skip_type}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'")
|
| 475 |
+
|
| 476 |
+
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
|
| 477 |
+
"""
|
| 478 |
+
Get the order of each step for sampling by the singlestep DPM-Solver.
|
| 479 |
+
"""
|
| 480 |
+
if order == 3:
|
| 481 |
+
K = steps // 3 + 1
|
| 482 |
+
if steps % 3 == 0:
|
| 483 |
+
orders = [3,] * (K - 2) + [2, 1]
|
| 484 |
+
elif steps % 3 == 1:
|
| 485 |
+
orders = [3,] * (K - 1) + [1]
|
| 486 |
+
else:
|
| 487 |
+
orders = [3,] * (K - 1) + [2]
|
| 488 |
+
elif order == 2:
|
| 489 |
+
if steps % 2 == 0:
|
| 490 |
+
K = steps // 2
|
| 491 |
+
orders = [2,] * K
|
| 492 |
+
else:
|
| 493 |
+
K = steps // 2 + 1
|
| 494 |
+
orders = [2,] * (K - 1) + [1]
|
| 495 |
+
elif order == 1:
|
| 496 |
+
K = steps
|
| 497 |
+
orders = [1,] * steps
|
| 498 |
+
else:
|
| 499 |
+
raise ValueError("'order' must be '1' or '2' or '3'.")
|
| 500 |
+
if skip_type == 'logSNR':
|
| 501 |
+
# To reproduce the results in DPM-Solver paper
|
| 502 |
+
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
|
| 503 |
+
else:
|
| 504 |
+
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders), 0).to(device)]
|
| 505 |
+
return timesteps_outer, orders
|
| 506 |
+
|
| 507 |
+
def denoise_to_zero_fn(self, x, s):
|
| 508 |
+
"""
|
| 509 |
+
Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
|
| 510 |
+
"""
|
| 511 |
+
return self.data_prediction_fn(x, s)
|
| 512 |
+
|
| 513 |
+
def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, order, **kwargs):
|
| 514 |
+
if len(t.shape) == 0:
|
| 515 |
+
t = t.view(-1)
|
| 516 |
+
if 'bh' in self.variant:
|
| 517 |
+
return self.multistep_uni_pc_bh_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
|
| 518 |
+
else:
|
| 519 |
+
assert self.variant == 'vary_coeff'
|
| 520 |
+
return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
|
| 521 |
+
|
| 522 |
+
def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
|
| 523 |
+
#print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)')
|
| 524 |
+
ns = self.noise_schedule
|
| 525 |
+
assert order <= len(model_prev_list)
|
| 526 |
+
|
| 527 |
+
# first compute rks
|
| 528 |
+
t_prev_0 = t_prev_list[-1]
|
| 529 |
+
lambda_prev_0 = ns.marginal_lambda(t_prev_0)
|
| 530 |
+
lambda_t = ns.marginal_lambda(t)
|
| 531 |
+
model_prev_0 = model_prev_list[-1]
|
| 532 |
+
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
|
| 533 |
+
log_alpha_t = ns.marginal_log_mean_coeff(t)
|
| 534 |
+
alpha_t = torch.exp(log_alpha_t)
|
| 535 |
+
|
| 536 |
+
h = lambda_t - lambda_prev_0
|
| 537 |
+
|
| 538 |
+
rks = []
|
| 539 |
+
D1s = []
|
| 540 |
+
for i in range(1, order):
|
| 541 |
+
t_prev_i = t_prev_list[-(i + 1)]
|
| 542 |
+
model_prev_i = model_prev_list[-(i + 1)]
|
| 543 |
+
lambda_prev_i = ns.marginal_lambda(t_prev_i)
|
| 544 |
+
rk = (lambda_prev_i - lambda_prev_0) / h
|
| 545 |
+
rks.append(rk)
|
| 546 |
+
D1s.append((model_prev_i - model_prev_0) / rk)
|
| 547 |
+
|
| 548 |
+
rks.append(1.)
|
| 549 |
+
rks = torch.tensor(rks, device=x.device)
|
| 550 |
+
|
| 551 |
+
K = len(rks)
|
| 552 |
+
# build C matrix
|
| 553 |
+
C = []
|
| 554 |
+
|
| 555 |
+
col = torch.ones_like(rks)
|
| 556 |
+
for k in range(1, K + 1):
|
| 557 |
+
C.append(col)
|
| 558 |
+
col = col * rks / (k + 1)
|
| 559 |
+
C = torch.stack(C, dim=1)
|
| 560 |
+
|
| 561 |
+
if len(D1s) > 0:
|
| 562 |
+
D1s = torch.stack(D1s, dim=1) # (B, K)
|
| 563 |
+
C_inv_p = torch.linalg.inv(C[:-1, :-1])
|
| 564 |
+
A_p = C_inv_p
|
| 565 |
+
|
| 566 |
+
if use_corrector:
|
| 567 |
+
#print('using corrector')
|
| 568 |
+
C_inv = torch.linalg.inv(C)
|
| 569 |
+
A_c = C_inv
|
| 570 |
+
|
| 571 |
+
hh = -h if self.predict_x0 else h
|
| 572 |
+
h_phi_1 = torch.expm1(hh)
|
| 573 |
+
h_phi_ks = []
|
| 574 |
+
factorial_k = 1
|
| 575 |
+
h_phi_k = h_phi_1
|
| 576 |
+
for k in range(1, K + 2):
|
| 577 |
+
h_phi_ks.append(h_phi_k)
|
| 578 |
+
h_phi_k = h_phi_k / hh - 1 / factorial_k
|
| 579 |
+
factorial_k *= (k + 1)
|
| 580 |
+
|
| 581 |
+
model_t = None
|
| 582 |
+
if self.predict_x0:
|
| 583 |
+
x_t_ = (
|
| 584 |
+
sigma_t / sigma_prev_0 * x
|
| 585 |
+
- alpha_t * h_phi_1 * model_prev_0
|
| 586 |
+
)
|
| 587 |
+
# now predictor
|
| 588 |
+
x_t = x_t_
|
| 589 |
+
if len(D1s) > 0:
|
| 590 |
+
# compute the residuals for predictor
|
| 591 |
+
for k in range(K - 1):
|
| 592 |
+
x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
|
| 593 |
+
# now corrector
|
| 594 |
+
if use_corrector:
|
| 595 |
+
model_t = self.model_fn(x_t, t)
|
| 596 |
+
D1_t = (model_t - model_prev_0)
|
| 597 |
+
x_t = x_t_
|
| 598 |
+
k = 0
|
| 599 |
+
for k in range(K - 1):
|
| 600 |
+
x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
|
| 601 |
+
x_t = x_t - alpha_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
|
| 602 |
+
else:
|
| 603 |
+
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
|
| 604 |
+
x_t_ = (
|
| 605 |
+
(torch.exp(log_alpha_t - log_alpha_prev_0)) * x
|
| 606 |
+
- (sigma_t * h_phi_1) * model_prev_0
|
| 607 |
+
)
|
| 608 |
+
# now predictor
|
| 609 |
+
x_t = x_t_
|
| 610 |
+
if len(D1s) > 0:
|
| 611 |
+
# compute the residuals for predictor
|
| 612 |
+
for k in range(K - 1):
|
| 613 |
+
x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
|
| 614 |
+
# now corrector
|
| 615 |
+
if use_corrector:
|
| 616 |
+
model_t = self.model_fn(x_t, t)
|
| 617 |
+
D1_t = (model_t - model_prev_0)
|
| 618 |
+
x_t = x_t_
|
| 619 |
+
k = 0
|
| 620 |
+
for k in range(K - 1):
|
| 621 |
+
x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
|
| 622 |
+
x_t = x_t - sigma_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
|
| 623 |
+
return x_t, model_t
|
| 624 |
+
|
| 625 |
+
def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order, x_t=None, use_corrector=True):
|
| 626 |
+
#print(f'using unified predictor-corrector with order {order} (solver type: B(h))')
|
| 627 |
+
ns = self.noise_schedule
|
| 628 |
+
assert order <= len(model_prev_list)
|
| 629 |
+
dims = x.dim()
|
| 630 |
+
|
| 631 |
+
# first compute rks
|
| 632 |
+
t_prev_0 = t_prev_list[-1]
|
| 633 |
+
lambda_prev_0 = ns.marginal_lambda(t_prev_0)
|
| 634 |
+
lambda_t = ns.marginal_lambda(t)
|
| 635 |
+
model_prev_0 = model_prev_list[-1]
|
| 636 |
+
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
|
| 637 |
+
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
|
| 638 |
+
alpha_t = torch.exp(log_alpha_t)
|
| 639 |
+
|
| 640 |
+
h = lambda_t - lambda_prev_0
|
| 641 |
+
|
| 642 |
+
rks = []
|
| 643 |
+
D1s = []
|
| 644 |
+
for i in range(1, order):
|
| 645 |
+
t_prev_i = t_prev_list[-(i + 1)]
|
| 646 |
+
model_prev_i = model_prev_list[-(i + 1)]
|
| 647 |
+
lambda_prev_i = ns.marginal_lambda(t_prev_i)
|
| 648 |
+
rk = ((lambda_prev_i - lambda_prev_0) / h)[0]
|
| 649 |
+
rks.append(rk)
|
| 650 |
+
D1s.append((model_prev_i - model_prev_0) / rk)
|
| 651 |
+
|
| 652 |
+
rks.append(1.)
|
| 653 |
+
rks = torch.tensor(rks, device=x.device)
|
| 654 |
+
|
| 655 |
+
R = []
|
| 656 |
+
b = []
|
| 657 |
+
|
| 658 |
+
hh = -h[0] if self.predict_x0 else h[0]
|
| 659 |
+
h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1
|
| 660 |
+
h_phi_k = h_phi_1 / hh - 1
|
| 661 |
+
|
| 662 |
+
factorial_i = 1
|
| 663 |
+
|
| 664 |
+
if self.variant == 'bh1':
|
| 665 |
+
B_h = hh
|
| 666 |
+
elif self.variant == 'bh2':
|
| 667 |
+
B_h = torch.expm1(hh)
|
| 668 |
+
else:
|
| 669 |
+
raise NotImplementedError()
|
| 670 |
+
|
| 671 |
+
for i in range(1, order + 1):
|
| 672 |
+
R.append(torch.pow(rks, i - 1))
|
| 673 |
+
b.append(h_phi_k * factorial_i / B_h)
|
| 674 |
+
factorial_i *= (i + 1)
|
| 675 |
+
h_phi_k = h_phi_k / hh - 1 / factorial_i
|
| 676 |
+
|
| 677 |
+
R = torch.stack(R)
|
| 678 |
+
b = torch.tensor(b, device=x.device)
|
| 679 |
+
|
| 680 |
+
# now predictor
|
| 681 |
+
use_predictor = len(D1s) > 0 and x_t is None
|
| 682 |
+
if len(D1s) > 0:
|
| 683 |
+
D1s = torch.stack(D1s, dim=1) # (B, K)
|
| 684 |
+
if x_t is None:
|
| 685 |
+
# for order 2, we use a simplified version
|
| 686 |
+
if order == 2:
|
| 687 |
+
rhos_p = torch.tensor([0.5], device=b.device)
|
| 688 |
+
else:
|
| 689 |
+
rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
|
| 690 |
+
else:
|
| 691 |
+
D1s = None
|
| 692 |
+
|
| 693 |
+
if use_corrector:
|
| 694 |
+
#print('using corrector')
|
| 695 |
+
# for order 1, we use a simplified version
|
| 696 |
+
if order == 1:
|
| 697 |
+
rhos_c = torch.tensor([0.5], device=b.device)
|
| 698 |
+
else:
|
| 699 |
+
rhos_c = torch.linalg.solve(R, b)
|
| 700 |
+
|
| 701 |
+
model_t = None
|
| 702 |
+
if self.predict_x0:
|
| 703 |
+
x_t_ = (
|
| 704 |
+
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
| 705 |
+
- expand_dims(alpha_t * h_phi_1, dims)* model_prev_0
|
| 706 |
+
)
|
| 707 |
+
|
| 708 |
+
if x_t is None:
|
| 709 |
+
if use_predictor:
|
| 710 |
+
pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
|
| 711 |
+
else:
|
| 712 |
+
pred_res = 0
|
| 713 |
+
x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * pred_res
|
| 714 |
+
|
| 715 |
+
if use_corrector:
|
| 716 |
+
model_t = self.model_fn(x_t, t)
|
| 717 |
+
if D1s is not None:
|
| 718 |
+
corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
|
| 719 |
+
else:
|
| 720 |
+
corr_res = 0
|
| 721 |
+
D1_t = (model_t - model_prev_0)
|
| 722 |
+
x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
|
| 723 |
+
else:
|
| 724 |
+
x_t_ = (
|
| 725 |
+
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
| 726 |
+
- expand_dims(sigma_t * h_phi_1, dims) * model_prev_0
|
| 727 |
+
)
|
| 728 |
+
if x_t is None:
|
| 729 |
+
if use_predictor:
|
| 730 |
+
pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
|
| 731 |
+
else:
|
| 732 |
+
pred_res = 0
|
| 733 |
+
x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * pred_res
|
| 734 |
+
|
| 735 |
+
if use_corrector:
|
| 736 |
+
model_t = self.model_fn(x_t, t)
|
| 737 |
+
if D1s is not None:
|
| 738 |
+
corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
|
| 739 |
+
else:
|
| 740 |
+
corr_res = 0
|
| 741 |
+
D1_t = (model_t - model_prev_0)
|
| 742 |
+
x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
|
| 743 |
+
return x_t, model_t
|
| 744 |
+
|
| 745 |
+
|
| 746 |
+
def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
|
| 747 |
+
method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
|
| 748 |
+
atol=0.0078, rtol=0.05, corrector=False,
|
| 749 |
+
):
|
| 750 |
+
t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
|
| 751 |
+
t_T = self.noise_schedule.T if t_start is None else t_start
|
| 752 |
+
device = x.device
|
| 753 |
+
if method == 'multistep':
|
| 754 |
+
assert steps >= order, "UniPC order must be < sampling steps"
|
| 755 |
+
timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
|
| 756 |
+
#print(f"Running UniPC Sampling with {timesteps.shape[0]} timesteps, order {order}")
|
| 757 |
+
assert timesteps.shape[0] - 1 == steps
|
| 758 |
+
with torch.no_grad():
|
| 759 |
+
vec_t = timesteps[0].expand((x.shape[0]))
|
| 760 |
+
model_prev_list = [self.model_fn(x, vec_t)]
|
| 761 |
+
t_prev_list = [vec_t]
|
| 762 |
+
with tqdm.tqdm(total=steps) as pbar:
|
| 763 |
+
# Init the first `order` values by lower order multistep DPM-Solver.
|
| 764 |
+
for init_order in range(1, order):
|
| 765 |
+
vec_t = timesteps[init_order].expand(x.shape[0])
|
| 766 |
+
x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, init_order, use_corrector=True)
|
| 767 |
+
if model_x is None:
|
| 768 |
+
model_x = self.model_fn(x, vec_t)
|
| 769 |
+
if self.after_update is not None:
|
| 770 |
+
self.after_update(x, model_x)
|
| 771 |
+
model_prev_list.append(model_x)
|
| 772 |
+
t_prev_list.append(vec_t)
|
| 773 |
+
pbar.update()
|
| 774 |
+
|
| 775 |
+
for step in range(order, steps + 1):
|
| 776 |
+
vec_t = timesteps[step].expand(x.shape[0])
|
| 777 |
+
if lower_order_final:
|
| 778 |
+
step_order = min(order, steps + 1 - step)
|
| 779 |
+
else:
|
| 780 |
+
step_order = order
|
| 781 |
+
#print('this step order:', step_order)
|
| 782 |
+
if step == steps:
|
| 783 |
+
#print('do not run corrector at the last step')
|
| 784 |
+
use_corrector = False
|
| 785 |
+
else:
|
| 786 |
+
use_corrector = True
|
| 787 |
+
x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, step_order, use_corrector=use_corrector)
|
| 788 |
+
if self.after_update is not None:
|
| 789 |
+
self.after_update(x, model_x)
|
| 790 |
+
for i in range(order - 1):
|
| 791 |
+
t_prev_list[i] = t_prev_list[i + 1]
|
| 792 |
+
model_prev_list[i] = model_prev_list[i + 1]
|
| 793 |
+
t_prev_list[-1] = vec_t
|
| 794 |
+
# We do not need to evaluate the final model value.
|
| 795 |
+
if step < steps:
|
| 796 |
+
if model_x is None:
|
| 797 |
+
model_x = self.model_fn(x, vec_t)
|
| 798 |
+
model_prev_list[-1] = model_x
|
| 799 |
+
pbar.update()
|
| 800 |
+
else:
|
| 801 |
+
raise NotImplementedError()
|
| 802 |
+
if denoise_to_zero:
|
| 803 |
+
x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
|
| 804 |
+
return x
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
#############################################################
|
| 808 |
+
# other utility functions
|
| 809 |
+
#############################################################
|
| 810 |
+
|
| 811 |
+
def interpolate_fn(x, xp, yp):
|
| 812 |
+
"""
|
| 813 |
+
A piecewise linear function y = f(x), using xp and yp as keypoints.
|
| 814 |
+
We implement f(x) in a differentiable way (i.e. applicable for autograd).
|
| 815 |
+
The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
|
| 816 |
+
|
| 817 |
+
Args:
|
| 818 |
+
x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
|
| 819 |
+
xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
|
| 820 |
+
yp: PyTorch tensor with shape [C, K].
|
| 821 |
+
Returns:
|
| 822 |
+
The function values f(x), with shape [N, C].
|
| 823 |
+
"""
|
| 824 |
+
N, K = x.shape[0], xp.shape[1]
|
| 825 |
+
all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
|
| 826 |
+
sorted_all_x, x_indices = torch.sort(all_x, dim=2)
|
| 827 |
+
x_idx = torch.argmin(x_indices, dim=2)
|
| 828 |
+
cand_start_idx = x_idx - 1
|
| 829 |
+
start_idx = torch.where(
|
| 830 |
+
torch.eq(x_idx, 0),
|
| 831 |
+
torch.tensor(1, device=x.device),
|
| 832 |
+
torch.where(
|
| 833 |
+
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
|
| 834 |
+
),
|
| 835 |
+
)
|
| 836 |
+
end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
|
| 837 |
+
start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
|
| 838 |
+
end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
|
| 839 |
+
start_idx2 = torch.where(
|
| 840 |
+
torch.eq(x_idx, 0),
|
| 841 |
+
torch.tensor(0, device=x.device),
|
| 842 |
+
torch.where(
|
| 843 |
+
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
|
| 844 |
+
),
|
| 845 |
+
)
|
| 846 |
+
y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
|
| 847 |
+
start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
|
| 848 |
+
end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
|
| 849 |
+
cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
|
| 850 |
+
return cand
|
| 851 |
+
|
| 852 |
+
|
| 853 |
+
def expand_dims(v, dims):
|
| 854 |
+
"""
|
| 855 |
+
Expand the tensor `v` to the dim `dims`.
|
| 856 |
+
|
| 857 |
+
Args:
|
| 858 |
+
`v`: a PyTorch tensor with shape [N].
|
| 859 |
+
`dim`: a `int`.
|
| 860 |
+
Returns:
|
| 861 |
+
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
|
| 862 |
+
"""
|
| 863 |
+
return v[(...,) + (None,)*(dims - 1)]
|