BiliSakura commited on
Commit
3d6b21e
·
verified ·
1 Parent(s): 9d0b5fb

Upload folder using huggingface_hub

Browse files
PixNerd-XL-16-256/conversion_metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architecture": {
3
+ "hidden_size": 1152,
4
+ "hidden_size_x": 64,
5
+ "in_channels": 3,
6
+ "nerf_mlpratio": 2,
7
+ "num_blocks": 30,
8
+ "num_classes": 1000,
9
+ "num_cond_blocks": 26,
10
+ "num_groups": 16,
11
+ "patch_size": 16
12
+ },
13
+ "checkpoint": "D:\\sakura-project\\PixNerd-diffusers\\raw\\imagenet256\\epoch%3D319-step%3D1600000_emainit.ckpt",
14
+ "source_prefix": "ema_denoiser."
15
+ }
PixNerd-XL-16-256/model_index.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PixNerdPipeline",
3
+ "_diffusers_version": "0.30.0",
4
+ "scheduler": [
5
+ "diffusers_modules.local.scheduling_pixnerd_flow_match",
6
+ "PixNerdFlowMatchScheduler"
7
+ ],
8
+ "transformer": [
9
+ "diffusers_modules.local.modeling_pixnerd_transformer_2d",
10
+ "PixNerdTransformer2DModel"
11
+ ]
12
+ }
PixNerd-XL-16-256/modeling_pixnerd_transformer_2d.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ import importlib
5
+ import math
6
+ from dataclasses import dataclass
7
+ from functools import lru_cache
8
+ from typing import Any, Dict, Iterable, List, Optional, Tuple
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
13
+ from diffusers.models.modeling_utils import ModelMixin
14
+ from diffusers.utils import BaseOutput
15
+ from torch.nn.functional import scaled_dot_product_attention
16
+
17
+ class BaseAE(torch.nn.Module):
18
+ def __init__(self, scale=1.0, shift=0.0):
19
+ super().__init__()
20
+ self.scale = scale
21
+ self.shift = shift
22
+
23
+ def encode(self, x):
24
+ return self._impl_encode(x) #.to(torch.bfloat16)
25
+
26
+ # @torch.autocast("cuda", dtype=torch.bfloat16)
27
+ def decode(self, x):
28
+ return self._impl_decode(x) #.to(torch.bfloat16)
29
+
30
+ def _impl_encode(self, x):
31
+ raise NotImplementedError
32
+
33
+ def _impl_decode(self, x):
34
+ raise NotImplementedError
35
+
36
+ def uint82fp(x):
37
+ x = x.to(torch.float32)
38
+ x = (x - 127.5) / 127.5
39
+ return x
40
+
41
+ def fp2uint8(x):
42
+ x = torch.clip_((x + 1) * 127.5 + 0.5, 0, 255).to(torch.uint8)
43
+ return x
44
+
45
+
46
+ class PixelAE(BaseAE):
47
+ def __init__(self, scale=1.0, shift=0.0):
48
+ super().__init__(scale, shift)
49
+
50
+ def _impl_encode(self, x):
51
+ return x/self.scale+self.shift
52
+
53
+ def _impl_decode(self, x):
54
+ return (x-self.shift)*self.scale
55
+
56
+
57
+ def resolve_conditioner_device(metadata: dict, fallback: torch.device | None = None) -> torch.device:
58
+ if metadata is None:
59
+ metadata = {}
60
+ if "device" in metadata and metadata["device"] is not None:
61
+ return torch.device(metadata["device"])
62
+ if fallback is not None:
63
+ return fallback
64
+ return torch.device("cuda" if torch.cuda.is_available() else "cpu")
65
+
66
+
67
+ class BaseConditioner(nn.Module):
68
+ def __init__(self):
69
+ super(BaseConditioner, self).__init__()
70
+
71
+ def _impl_condition(self, y, metadata)->torch.Tensor:
72
+ raise NotImplementedError()
73
+
74
+ def _impl_uncondition(self, y, metadata)->torch.Tensor:
75
+ raise NotImplementedError()
76
+
77
+ @torch.no_grad()
78
+ def __call__(self, y, metadata:dict={}):
79
+ condition = self._impl_condition(y, metadata)
80
+ uncondition = self._impl_uncondition(y, metadata)
81
+ if condition.dtype in [torch.float64, torch.float32, torch.float16]:
82
+ condition = condition.to(torch.bfloat16)
83
+ if uncondition.dtype in [torch.float64,torch.float32, torch.float16]:
84
+ uncondition = uncondition.to(torch.bfloat16)
85
+ return condition, uncondition
86
+
87
+
88
+ class ComposeConditioner(BaseConditioner):
89
+ def __init__(self, conditioners:List[BaseConditioner]):
90
+ super().__init__()
91
+ self.conditioners = conditioners
92
+
93
+ def _impl_condition(self, y, metadata):
94
+ condition = []
95
+ for conditioner in self.conditioners:
96
+ condition.append(conditioner._impl_condition(y, metadata))
97
+ condition = torch.cat(condition, dim=1)
98
+ return condition
99
+
100
+ def _impl_uncondition(self, y, metadata):
101
+ uncondition = []
102
+ for conditioner in self.conditioners:
103
+ uncondition.append(conditioner._impl_uncondition(y, metadata))
104
+ uncondition = torch.cat(uncondition, dim=1)
105
+ return uncondition
106
+
107
+
108
+ class LabelConditioner(BaseConditioner):
109
+ def __init__(self, num_classes):
110
+ super().__init__()
111
+ self.null_condition = num_classes
112
+
113
+ def _impl_condition(self, y, metadata):
114
+ device = resolve_conditioner_device(metadata)
115
+ return torch.tensor(y, device=device).long()
116
+
117
+ def _impl_uncondition(self, y, metadata):
118
+ device = resolve_conditioner_device(metadata)
119
+ return torch.full((len(y),), self.null_condition, dtype=torch.long, device=device)
120
+
121
+
122
+ def modulate(x, shift, scale):
123
+ return x * (1 + scale) + shift
124
+
125
+ class Embed(nn.Module):
126
+ def __init__(
127
+ self,
128
+ in_chans: int = 3,
129
+ embed_dim: int = 768,
130
+ norm_layer = None,
131
+ bias: bool = True,
132
+ ):
133
+ super().__init__()
134
+ self.in_chans = in_chans
135
+ self.embed_dim = embed_dim
136
+ self.proj = nn.Linear(in_chans, embed_dim, bias=bias)
137
+ self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
138
+ def forward(self, x):
139
+ x = self.proj(x)
140
+ x = self.norm(x)
141
+ return x
142
+
143
+ class TimestepEmbedder(nn.Module):
144
+
145
+ def __init__(self, hidden_size, frequency_embedding_size=256):
146
+ super().__init__()
147
+ self.mlp = nn.Sequential(
148
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
149
+ nn.SiLU(),
150
+ nn.Linear(hidden_size, hidden_size, bias=True),
151
+ )
152
+ self.frequency_embedding_size = frequency_embedding_size
153
+
154
+ @staticmethod
155
+ def timestep_embedding(t, dim, max_period=10):
156
+ half = dim // 2
157
+ freqs = torch.exp(
158
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half
159
+ )
160
+ args = t[..., None].float() * freqs[None, ...]
161
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
162
+ if dim % 2:
163
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
164
+ return embedding
165
+
166
+ def forward(self, t):
167
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
168
+ t_emb = self.mlp(t_freq)
169
+ return t_emb
170
+
171
+ class LabelEmbedder(nn.Module):
172
+ def __init__(self, num_classes, hidden_size):
173
+ super().__init__()
174
+ self.embedding_table = nn.Embedding(num_classes, hidden_size)
175
+ self.num_classes = num_classes
176
+
177
+ def forward(self, labels,):
178
+ embeddings = self.embedding_table(labels)
179
+ return embeddings
180
+
181
+ class FinalLayer(nn.Module):
182
+ def __init__(self, hidden_size, out_channels):
183
+ super().__init__()
184
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
185
+ self.linear = nn.Linear(hidden_size, out_channels, bias=True)
186
+ self.adaLN_modulation = nn.Sequential(
187
+ nn.Linear(hidden_size, 2*hidden_size, bias=True)
188
+ )
189
+
190
+ def forward(self, x, c):
191
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1)
192
+ x = modulate(self.norm_final(x), shift, scale)
193
+ x = self.linear(x)
194
+ return x
195
+
196
+ class RMSNorm(nn.Module):
197
+ def __init__(self, hidden_size, eps=1e-6):
198
+ """
199
+ LlamaRMSNorm is equivalent to T5LayerNorm
200
+ """
201
+ super().__init__()
202
+ self.weight = nn.Parameter(torch.ones(hidden_size))
203
+ self.variance_epsilon = eps
204
+
205
+ def forward(self, hidden_states):
206
+ input_dtype = hidden_states.dtype
207
+ hidden_states = hidden_states.to(torch.float32)
208
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
209
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
210
+ return self.weight * hidden_states.to(input_dtype)
211
+
212
+ class FeedForward(nn.Module):
213
+ def __init__(
214
+ self,
215
+ dim: int,
216
+ hidden_dim: int,
217
+ ):
218
+ super().__init__()
219
+ hidden_dim = int(2 * hidden_dim / 3)
220
+ self.w1 = nn.Linear(dim, hidden_dim, bias=False)
221
+ self.w3 = nn.Linear(dim, hidden_dim, bias=False)
222
+ self.w2 = nn.Linear(hidden_dim, dim, bias=False)
223
+ def forward(self, x):
224
+ x = self.w2(torch.nn.functional.silu(self.w1(x)) * self.w3(x))
225
+ return x
226
+
227
+ def precompute_freqs_cis_2d(dim: int, height: int, width:int, theta: float = 10000.0, scale=16.0):
228
+ # assert H * H == end
229
+ # flat_patch_pos = torch.linspace(-1, 1, end) # N = end
230
+ x_pos = torch.linspace(0, scale, width)
231
+ y_pos = torch.linspace(0, scale, height)
232
+ y_pos, x_pos = torch.meshgrid(y_pos, x_pos, indexing="ij")
233
+ y_pos = y_pos.reshape(-1)
234
+ x_pos = x_pos.reshape(-1)
235
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) # Hc/4
236
+ x_freqs = torch.outer(x_pos, freqs).float() # N Hc/4
237
+ y_freqs = torch.outer(y_pos, freqs).float() # N Hc/4
238
+ x_cis = torch.polar(torch.ones_like(x_freqs), x_freqs)
239
+ y_cis = torch.polar(torch.ones_like(y_freqs), y_freqs)
240
+ freqs_cis = torch.cat([x_cis.unsqueeze(dim=-1), y_cis.unsqueeze(dim=-1)], dim=-1) # N,Hc/4,2
241
+ freqs_cis = freqs_cis.reshape(height*width, -1)
242
+ return freqs_cis
243
+
244
+
245
+ def apply_rotary_emb(
246
+ xq: torch.Tensor,
247
+ xk: torch.Tensor,
248
+ freqs_cis: torch.Tensor,
249
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
250
+ freqs_cis = freqs_cis[None, :, None, :]
251
+ # xq : B N H Hc
252
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) # B N H Hc/2
253
+ xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
254
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) # B, N, H, Hc
255
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
256
+ return xq_out.type_as(xq), xk_out.type_as(xk)
257
+
258
+
259
+ class RAttention(nn.Module):
260
+ def __init__(
261
+ self,
262
+ dim: int,
263
+ num_heads: int = 8,
264
+ qkv_bias: bool = False,
265
+ qk_norm: bool = True,
266
+ attn_drop: float = 0.,
267
+ proj_drop: float = 0.,
268
+ norm_layer: nn.Module = RMSNorm,
269
+ ) -> None:
270
+ super().__init__()
271
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
272
+
273
+ self.dim = dim
274
+ self.num_heads = num_heads
275
+ self.head_dim = dim // num_heads
276
+ self.scale = self.head_dim ** -0.5
277
+
278
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
279
+ self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
280
+ self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
281
+ self.attn_drop = nn.Dropout(attn_drop)
282
+ self.proj = nn.Linear(dim, dim)
283
+ self.proj_drop = nn.Dropout(proj_drop)
284
+
285
+ def forward(self, x: torch.Tensor, pos, mask) -> torch.Tensor:
286
+ B, N, C = x.shape
287
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 1, 3, 4)
288
+ q, k, v = qkv[0], qkv[1], qkv[2] # B N H Hc
289
+ q = self.q_norm(q)
290
+ k = self.k_norm(k)
291
+ q, k = apply_rotary_emb(q, k, freqs_cis=pos)
292
+ q = q.view(B, -1, self.num_heads, C // self.num_heads).transpose(1, 2) # B, H, N, Hc
293
+ k = k.view(B, -1, self.num_heads, C // self.num_heads).transpose(1, 2).contiguous() # B, H, N, Hc
294
+ v = v.view(B, -1, self.num_heads, C // self.num_heads).transpose(1, 2).contiguous()
295
+
296
+ x = scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0)
297
+
298
+ x = x.transpose(1, 2).reshape(B, N, C)
299
+ x = self.proj(x)
300
+ x = self.proj_drop(x)
301
+ return x
302
+
303
+
304
+
305
+ class FlattenDiTBlock(nn.Module):
306
+ def __init__(self, hidden_size, groups, mlp_ratio=4.0, ):
307
+ super().__init__()
308
+ self.norm1 = RMSNorm(hidden_size, eps=1e-6)
309
+ self.attn = RAttention(hidden_size, num_heads=groups, qkv_bias=False)
310
+ self.norm2 = RMSNorm(hidden_size, eps=1e-6)
311
+ mlp_hidden_dim = int(hidden_size * mlp_ratio)
312
+ self.mlp = FeedForward(hidden_size, mlp_hidden_dim)
313
+ self.adaLN_modulation = nn.Sequential(
314
+ nn.Linear(hidden_size, 6 * hidden_size, bias=True)
315
+ )
316
+
317
+ def forward(self, x, c, pos, mask=None):
318
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(6, dim=-1)
319
+ x = x + gate_msa * self.attn(modulate(self.norm1(x), shift_msa, scale_msa), pos, mask=mask)
320
+ x = x + gate_mlp * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp))
321
+ return x
322
+
323
+ class NerfEmbedder(nn.Module):
324
+ def __init__(self, in_channels, hidden_size_input, max_freqs):
325
+ super().__init__()
326
+ self.max_freqs = max_freqs
327
+ self.hidden_size_input = hidden_size_input
328
+ self.embedder = nn.Sequential(
329
+ nn.Linear(in_channels+max_freqs**2, hidden_size_input, bias=True),
330
+ )
331
+
332
+ @lru_cache
333
+ def fetch_pos(self, patch_size, device, dtype):
334
+ pos_x = torch.linspace(0, 1, patch_size, device=device, dtype=dtype)
335
+ pos_y = torch.linspace(0, 1, patch_size, device=device, dtype=dtype)
336
+ pos_y, pos_x = torch.meshgrid(pos_y, pos_x, indexing="ij")
337
+ pos_x = pos_x.reshape(-1, 1, 1)
338
+ pos_y = pos_y.reshape(-1, 1, 1)
339
+
340
+ freqs = torch.linspace(0, self.max_freqs, self.max_freqs, dtype=dtype, device=device)
341
+ freqs_x = freqs[None, :, None]
342
+ freqs_y = freqs[None, None, :]
343
+ coeffs = (1 + freqs_x * freqs_y) ** -1
344
+ dct_x = torch.cos(pos_x * freqs_x * torch.pi)
345
+ dct_y = torch.cos(pos_y * freqs_y * torch.pi)
346
+ dct = (dct_x * dct_y * coeffs).view(1, -1, self.max_freqs ** 2)
347
+ return dct
348
+
349
+
350
+ def forward(self, inputs):
351
+ B, P2, C = inputs.shape
352
+ patch_size = int(P2 ** 0.5)
353
+ device = inputs.device
354
+ dtype = inputs.dtype
355
+ dct = self.fetch_pos(patch_size, device, dtype)
356
+ dct = dct.repeat(B, 1, 1)
357
+ inputs = torch.cat([inputs, dct], dim=-1)
358
+ inputs = self.embedder(inputs)
359
+ return inputs
360
+
361
+
362
+ class NerfBlock(nn.Module):
363
+ def __init__(self, hidden_size_s, hidden_size_x, mlp_ratio=4):
364
+ super().__init__()
365
+ self.param_generator1 = nn.Sequential(
366
+ nn.Linear(hidden_size_s, 2*hidden_size_x**2*mlp_ratio, bias=True),
367
+ )
368
+ self.norm = RMSNorm(hidden_size_x, eps=1e-6)
369
+ self.mlp_ratio = mlp_ratio
370
+ def forward(self, x, s):
371
+ batch_size, num_x, hidden_size_x = x.shape
372
+ mlp_params1 = self.param_generator1(s)
373
+ fc1_param1, fc2_param1 = mlp_params1.chunk(2, dim=-1)
374
+ fc1_param1 = fc1_param1.view(batch_size, hidden_size_x, hidden_size_x*self.mlp_ratio)
375
+ fc2_param1 = fc2_param1.view(batch_size, hidden_size_x*self.mlp_ratio, hidden_size_x)
376
+
377
+ # normalize fc1
378
+ normalized_fc1_param1 = torch.nn.functional.normalize(fc1_param1, dim=-2)
379
+ # normalize fc2
380
+ normalized_fc2_param1 = torch.nn.functional.normalize(fc2_param1, dim=-2)
381
+ # mlp 1
382
+ res_x = x
383
+ x = self.norm(x)
384
+ x = torch.bmm(x, normalized_fc1_param1)
385
+ x = torch.nn.functional.silu(x)
386
+ x = torch.bmm(x, normalized_fc2_param1)
387
+ x = x + res_x
388
+ return x
389
+
390
+ class NerfFinalLayer(nn.Module):
391
+ def __init__(self, hidden_size, out_channels):
392
+ super().__init__()
393
+ self.norm = RMSNorm(hidden_size, eps=1e-6)
394
+ self.linear = nn.Linear(hidden_size, out_channels, bias=True)
395
+ def forward(self, x):
396
+ x = self.norm(x)
397
+ x = self.linear(x)
398
+ return x
399
+
400
+ class PixNerDiT(nn.Module):
401
+ def __init__(
402
+ self,
403
+ in_channels=4,
404
+ num_groups=12,
405
+ hidden_size=1152,
406
+ hidden_size_x=64,
407
+ nerf_mlpratio=4,
408
+ num_blocks=18,
409
+ num_cond_blocks=4,
410
+ patch_size=2,
411
+ num_classes=1000,
412
+ learn_sigma=True,
413
+ deep_supervision=0,
414
+ weight_path=None,
415
+ load_ema=False,
416
+ ):
417
+ super().__init__()
418
+ self.deep_supervision = deep_supervision
419
+ self.learn_sigma = learn_sigma
420
+ self.in_channels = in_channels
421
+ self.out_channels = in_channels
422
+ self.hidden_size = hidden_size
423
+ self.num_groups = num_groups
424
+ self.num_blocks = num_blocks
425
+ self.num_cond_blocks = num_cond_blocks
426
+ self.patch_size = patch_size
427
+ self.x_embedder = NerfEmbedder(in_channels, hidden_size_x, max_freqs=8)
428
+ self.s_embedder = Embed(in_channels*patch_size**2, hidden_size, bias=True)
429
+ self.t_embedder = TimestepEmbedder(hidden_size)
430
+ self.y_embedder = LabelEmbedder(num_classes+1, hidden_size)
431
+
432
+ self.final_layer = NerfFinalLayer(hidden_size_x, self.out_channels)
433
+
434
+ self.weight_path = weight_path
435
+
436
+ self.load_ema = load_ema
437
+ self.blocks = nn.ModuleList([
438
+ FlattenDiTBlock(self.hidden_size, self.num_groups) for _ in range(self.num_cond_blocks)
439
+ ])
440
+ self.blocks.extend([
441
+ NerfBlock(self.hidden_size, hidden_size_x, nerf_mlpratio) for _ in range(self.num_cond_blocks, self.num_blocks)
442
+ ])
443
+ self.initialize_weights()
444
+ self.precompute_pos = dict()
445
+
446
+ def fetch_pos(self, height, width, device):
447
+ if (height, width) in self.precompute_pos:
448
+ return self.precompute_pos[(height, width)].to(device)
449
+ else:
450
+ pos = precompute_freqs_cis_2d(self.hidden_size // self.num_groups, height, width).to(device)
451
+ self.precompute_pos[(height, width)] = pos
452
+ return pos
453
+
454
+ def initialize_weights(self):
455
+ # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):
456
+ w = self.s_embedder.proj.weight.data
457
+ nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
458
+ nn.init.constant_(self.s_embedder.proj.bias, 0)
459
+
460
+ # Initialize label embedding table:
461
+ nn.init.normal_(self.y_embedder.embedding_table.weight, std=0.02)
462
+
463
+ # Initialize timestep embedding MLP:
464
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
465
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
466
+
467
+ # zero init final layer
468
+ nn.init.zeros_(self.final_layer.linear.weight)
469
+ nn.init.zeros_(self.final_layer.linear.bias)
470
+
471
+
472
+ def forward(self, x, t, y, s=None, mask=None):
473
+ B, _, H, W = x.shape
474
+ pos = self.fetch_pos(H//self.patch_size, W//self.patch_size, x.device)
475
+ x = torch.nn.functional.unfold(x, kernel_size=self.patch_size, stride=self.patch_size).transpose(1, 2)
476
+ t = self.t_embedder(t.view(-1)).view(B, -1, self.hidden_size)
477
+ y = self.y_embedder(y).view(B, 1, self.hidden_size)
478
+ c = nn.functional.silu(t + y)
479
+ if s is None:
480
+ s = self.s_embedder(x)
481
+ for i in range(self.num_cond_blocks):
482
+ s = self.blocks[i](s, c, pos, mask)
483
+ s = nn.functional.silu(t + s)
484
+ batch_size, length, _ = s.shape
485
+ x = x.reshape(batch_size*length, self.in_channels, self.patch_size**2)
486
+ x = x.transpose(1, 2)
487
+ s = s.view(batch_size*length, self.hidden_size)
488
+ x = self.x_embedder(x)
489
+ for i in range(self.num_cond_blocks, self.num_blocks):
490
+ x = self.blocks[i](x, s)
491
+ x = self.final_layer(x)
492
+ x = x.transpose(1, 2)
493
+ x = x.reshape(batch_size, length, -1)
494
+ x = torch.nn.functional.fold(x.transpose(1, 2).contiguous(), (H, W), kernel_size=self.patch_size, stride=self.patch_size)
495
+ return x
496
+
497
+
498
+ def to_container(config: Any) -> Any:
499
+ if hasattr(config, "items") and not isinstance(config, dict):
500
+ return {k: to_container(v) for k, v in config.items()}
501
+ if isinstance(config, list):
502
+ return [to_container(v) for v in config]
503
+ return config
504
+
505
+
506
+ def load_symbol(path: str) -> Any:
507
+ module_path, name = path.rsplit(".", 1)
508
+ module = importlib.import_module(module_path)
509
+ return getattr(module, name)
510
+
511
+
512
+ def instantiate_from_spec(spec: Any) -> Any:
513
+ spec = to_container(spec)
514
+ if isinstance(spec, dict) and "class_path" in spec:
515
+ class_or_fn = load_symbol(spec["class_path"])
516
+ init_args = spec.get("init_args", {})
517
+ if isinstance(init_args, dict):
518
+ init_args = {k: instantiate_from_spec(v) for k, v in init_args.items()}
519
+ return class_or_fn(**init_args)
520
+ if isinstance(spec, dict):
521
+ return {k: instantiate_from_spec(v) for k, v in spec.items()}
522
+ if isinstance(spec, list):
523
+ return [instantiate_from_spec(v) for v in spec]
524
+ if isinstance(spec, str) and "." in spec:
525
+ try:
526
+ return load_symbol(spec)
527
+ except Exception:
528
+ return spec
529
+ return spec
530
+
531
+
532
+ def clone_spec(spec: Dict[str, Any]) -> Dict[str, Any]:
533
+ return copy.deepcopy(to_container(spec))
534
+
535
+
536
+ def load_prefixed_state_dict(
537
+ module: Optional[torch.nn.Module],
538
+ state_dict: Dict[str, torch.Tensor],
539
+ prefixes: Iterable[str],
540
+ ) -> bool:
541
+ if module is None:
542
+ return False
543
+ for prefix in prefixes:
544
+ subset = {
545
+ key[len(prefix) :]: value
546
+ for key, value in state_dict.items()
547
+ if key.startswith(prefix)
548
+ }
549
+ if subset:
550
+ module.load_state_dict(subset, strict=False)
551
+ return True
552
+ return False
553
+
554
+
555
+ @dataclass
556
+ class PixNerdTransformer2DModelOutput(BaseOutput):
557
+ sample: torch.FloatTensor
558
+
559
+
560
+ class PixNerdTransformer2DModel(ModelMixin, ConfigMixin):
561
+ config_name = "config.json"
562
+
563
+ @register_to_config
564
+ def __init__(
565
+ self,
566
+ denoiser_spec: Dict[str, Any],
567
+ conditioner_spec: Dict[str, Any],
568
+ vae_spec: Optional[Dict[str, Any]] = None,
569
+ diffusion_trainer_spec: Optional[Dict[str, Any]] = None,
570
+ use_ema: bool = True,
571
+ ema_decay: float = 0.9999,
572
+ compile_denoiser: bool = False,
573
+ ) -> None:
574
+ super().__init__()
575
+ self.denoiser = instantiate_from_spec(to_container(denoiser_spec))
576
+ self.conditioner = instantiate_from_spec(to_container(conditioner_spec))
577
+ self.vae = instantiate_from_spec(to_container(vae_spec)) if vae_spec is not None else None
578
+ self.diffusion_trainer = (
579
+ instantiate_from_spec(to_container(diffusion_trainer_spec))
580
+ if diffusion_trainer_spec is not None
581
+ else None
582
+ )
583
+
584
+ self.use_ema = bool(use_ema)
585
+ self.ema_decay = float(ema_decay)
586
+ self.ema_denoiser = copy.deepcopy(self.denoiser) if self.use_ema else None
587
+ if self.ema_denoiser is not None:
588
+ self.ema_denoiser.to(torch.float32)
589
+
590
+ if compile_denoiser and hasattr(self.denoiser, "compile"):
591
+ self.denoiser.compile()
592
+ if self.ema_denoiser is not None:
593
+ self.ema_denoiser.compile()
594
+
595
+ self._freeze_non_trainable_modules()
596
+ if self.ema_denoiser is not None:
597
+ self.sync_ema()
598
+
599
+ @property
600
+ def patch_size(self) -> int:
601
+ return int(getattr(self.denoiser, "patch_size", 1))
602
+
603
+ @property
604
+ def in_channels(self) -> int:
605
+ return int(getattr(self.denoiser, "in_channels", 3))
606
+
607
+ @classmethod
608
+ def from_project_config(
609
+ cls,
610
+ model_config: Dict[str, Any],
611
+ use_ema: bool = True,
612
+ compile_denoiser: bool = False,
613
+ ) -> "PixNerdTransformer2DModel":
614
+ model_config = to_container(model_config)
615
+ ema_decay = model_config.get("ema_tracker", {}).get("init_args", {}).get("decay", 0.9999)
616
+ return cls(
617
+ denoiser_spec=model_config["denoiser"],
618
+ conditioner_spec=model_config["conditioner"],
619
+ vae_spec=model_config.get("vae"),
620
+ diffusion_trainer_spec=model_config.get("diffusion_trainer"),
621
+ use_ema=use_ema,
622
+ ema_decay=ema_decay,
623
+ compile_denoiser=compile_denoiser,
624
+ )
625
+
626
+ @staticmethod
627
+ def _as_timestep_tensor(
628
+ timestep: Any,
629
+ batch_size: int,
630
+ device: torch.device,
631
+ ) -> torch.Tensor:
632
+ if isinstance(timestep, torch.Tensor):
633
+ if timestep.ndim == 0:
634
+ return timestep.repeat(batch_size).to(device=device, dtype=torch.float32)
635
+ return timestep.to(device=device, dtype=torch.float32)
636
+ return torch.full((batch_size,), float(timestep), device=device, dtype=torch.float32)
637
+
638
+ def _freeze_module(self, module: Optional[torch.nn.Module]) -> None:
639
+ if module is None:
640
+ return
641
+ module.eval()
642
+ for parameter in module.parameters():
643
+ parameter.requires_grad = False
644
+
645
+ def _freeze_non_trainable_modules(self) -> None:
646
+ self._freeze_module(self.conditioner)
647
+ self._freeze_module(self.vae)
648
+ self._freeze_module(self.ema_denoiser)
649
+
650
+ def forward(
651
+ self,
652
+ sample: torch.Tensor,
653
+ timestep: Any,
654
+ encoder_hidden_states: torch.Tensor,
655
+ return_dict: bool = True,
656
+ ) -> PixNerdTransformer2DModelOutput | Tuple[torch.Tensor]:
657
+ t = self._as_timestep_tensor(timestep, sample.shape[0], sample.device)
658
+ out = self.denoiser(sample, t, encoder_hidden_states)
659
+ if not return_dict:
660
+ return (out,)
661
+ return PixNerdTransformer2DModelOutput(sample=out)
662
+
663
+ def predict_noise(
664
+ self,
665
+ sample: torch.Tensor,
666
+ timestep: Any,
667
+ encoder_hidden_states: torch.Tensor,
668
+ use_ema: bool = False,
669
+ ) -> torch.Tensor:
670
+ t = self._as_timestep_tensor(timestep, sample.shape[0], sample.device)
671
+ denoiser = self.get_inference_denoiser(use_ema=use_ema)
672
+ return denoiser(sample, t, encoder_hidden_states)
673
+
674
+ def get_inference_denoiser(self, use_ema: bool = True) -> torch.nn.Module:
675
+ if use_ema and self.ema_denoiser is not None:
676
+ return self.ema_denoiser
677
+ return self.denoiser
678
+
679
+ @torch.no_grad()
680
+ def get_conditioning(
681
+ self,
682
+ y: Iterable[Any],
683
+ metadata: Optional[Dict[str, Any]] = None,
684
+ ):
685
+ metadata = {} if metadata is None else metadata
686
+ return self.conditioner(y, metadata)
687
+
688
+ @torch.no_grad()
689
+ def encode(self, x: torch.Tensor) -> torch.Tensor:
690
+ if self.vae is None:
691
+ return x
692
+ return self.vae.encode(x)
693
+
694
+ @torch.no_grad()
695
+ def decode(self, latents: torch.Tensor) -> torch.Tensor:
696
+ if self.vae is None:
697
+ return latents
698
+ return self.vae.decode(latents)
699
+
700
+ @torch.no_grad()
701
+ def sync_ema(self) -> None:
702
+ if self.ema_denoiser is None:
703
+ return
704
+ self.ema_denoiser.load_state_dict(self.denoiser.state_dict(), strict=True)
705
+ self.ema_denoiser.to(torch.float32)
706
+
707
+ @torch.no_grad()
708
+ def ema_step(self, decay: Optional[float] = None) -> None:
709
+ if self.ema_denoiser is None:
710
+ return
711
+ decay = self.ema_decay if decay is None else float(decay)
712
+ for ema_param, param in zip(self.ema_denoiser.parameters(), self.denoiser.parameters()):
713
+ ema_param.mul_(decay).add_(param.detach().float(), alpha=1.0 - decay)
714
+
715
+ def compute_training_loss(
716
+ self,
717
+ x: torch.Tensor,
718
+ y: Iterable[Any],
719
+ scheduler: torch.nn.Module,
720
+ metadata: Optional[Dict[str, Any]] = None,
721
+ ) -> Dict[str, torch.Tensor]:
722
+ if self.diffusion_trainer is None:
723
+ raise RuntimeError("diffusion_trainer is not configured.")
724
+ metadata = {} if metadata is None else metadata
725
+
726
+ with torch.no_grad():
727
+ x = self.encode(x)
728
+ condition, uncondition = self.get_conditioning(y, metadata)
729
+
730
+ return self.diffusion_trainer(
731
+ self.denoiser,
732
+ self.ema_denoiser if self.ema_denoiser is not None else self.denoiser,
733
+ scheduler,
734
+ x,
735
+ condition,
736
+ uncondition,
737
+ metadata,
738
+ )
739
+
740
+ __all__ = [
741
+ "PixNerDiT",
742
+ "LabelConditioner",
743
+ "PixelAE",
744
+ "PixNerdTransformer2DModel",
745
+ "PixNerdTransformer2DModelOutput",
746
+ ]
PixNerd-XL-16-256/pipeline.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import List, Optional, Sequence, Union
5
+
6
+ import torch
7
+ from diffusers import DiffusionPipeline
8
+ from diffusers.image_processor import VaeImageProcessor
9
+ from diffusers.utils import BaseOutput
10
+ from PIL import Image
11
+
12
+ from .modeling_pixnerd_transformer_2d import PixNerdTransformer2DModel
13
+ from .scheduling_pixnerd_flow_match import PixNerdFlowMatchScheduler
14
+
15
+ ConditioningInput = Union[str, int, Sequence[Union[str, int]]]
16
+
17
+
18
+ @dataclass
19
+ class PixNerdPipelineOutput(BaseOutput):
20
+ images: Union[List[Image.Image], torch.Tensor, "np.ndarray"]
21
+
22
+
23
+ class PixNerdPipeline(DiffusionPipeline):
24
+ model_cpu_offload_seq = "conditioner->transformer->vae"
25
+ _callback_tensor_inputs = ["latents"]
26
+
27
+ def __init__(
28
+ self,
29
+ transformer,
30
+ scheduler: PixNerdFlowMatchScheduler,
31
+ vae=None,
32
+ conditioner=None,
33
+ ):
34
+ super().__init__()
35
+ if vae is None:
36
+ vae = getattr(transformer, "vae", None)
37
+ if conditioner is None:
38
+ conditioner = getattr(transformer, "conditioner", None)
39
+ if vae is None or conditioner is None:
40
+ raise ValueError("Pipeline requires `vae` and `conditioner` either explicitly or from `transformer`.")
41
+ self.register_modules(
42
+ vae=vae,
43
+ conditioner=conditioner,
44
+ transformer=transformer,
45
+ scheduler=scheduler,
46
+ )
47
+ self.image_processor = VaeImageProcessor(vae_scale_factor=1)
48
+
49
+ @staticmethod
50
+ def _fp_to_uint8(image: torch.Tensor) -> torch.Tensor:
51
+ return torch.clip_((image + 1) * 127.5 + 0.5, 0, 255).to(torch.uint8)
52
+
53
+ @staticmethod
54
+ def _to_list(y: ConditioningInput) -> List[Union[str, int]]:
55
+ if isinstance(y, (str, int)):
56
+ return [y]
57
+ return list(y)
58
+
59
+ @staticmethod
60
+ def _repeat(values: List[Union[str, int]], repeats: int) -> List[Union[str, int]]:
61
+ if repeats == 1:
62
+ return values
63
+ expanded: List[Union[str, int]] = []
64
+ for value in values:
65
+ expanded.extend([value] * repeats)
66
+ return expanded
67
+
68
+ def encode_prompt(
69
+ self,
70
+ prompt: ConditioningInput,
71
+ num_images_per_prompt: int,
72
+ ):
73
+ prompts = self._repeat(self._to_list(prompt), num_images_per_prompt)
74
+ metadata = {"device": self._execution_device}
75
+ with torch.no_grad():
76
+ cond, uncond = self.conditioner(prompts, metadata)
77
+ return cond, uncond, prompts
78
+
79
+ def prepare_latents(
80
+ self,
81
+ batch_size: int,
82
+ num_channels: int,
83
+ height: int,
84
+ width: int,
85
+ generator: Optional[torch.Generator] = None,
86
+ latents: Optional[torch.Tensor] = None,
87
+ ) -> torch.Tensor:
88
+ if latents is not None:
89
+ return latents.to(device=self._execution_device, dtype=torch.float32)
90
+ return torch.randn(
91
+ (batch_size, num_channels, height, width),
92
+ generator=generator,
93
+ device=self._execution_device,
94
+ dtype=torch.float32,
95
+ )
96
+
97
+ @torch.no_grad()
98
+ def __call__(
99
+ self,
100
+ prompt: ConditioningInput,
101
+ negative_prompt: Optional[ConditioningInput] = None,
102
+ num_images_per_prompt: int = 1,
103
+ height: int = 512,
104
+ width: int = 512,
105
+ num_inference_steps: int = 25,
106
+ guidance_scale: float = 4.0,
107
+ generator: Optional[torch.Generator] = None,
108
+ seed: Optional[int] = None,
109
+ latents: Optional[torch.Tensor] = None,
110
+ output_type: str = "pil",
111
+ return_dict: bool = True,
112
+ timeshift: float = 3.0,
113
+ order: int = 2,
114
+ ) -> PixNerdPipelineOutput | tuple:
115
+ patch_size = int(getattr(self.transformer, "patch_size", 1))
116
+ channels = int(getattr(self.transformer, "in_channels", 3))
117
+ height = (height // patch_size) * patch_size
118
+ width = (width // patch_size) * patch_size
119
+
120
+ if hasattr(self.transformer, "decoder_patch_scaling_h"):
121
+ self.transformer.decoder_patch_scaling_h = height / 512
122
+ self.transformer.decoder_patch_scaling_w = width / 512
123
+
124
+ cond, default_uncond, prompts = self.encode_prompt(prompt, num_images_per_prompt)
125
+ if negative_prompt is not None:
126
+ negative = self._repeat(self._to_list(negative_prompt), num_images_per_prompt)
127
+ metadata = {"device": self._execution_device}
128
+ with torch.no_grad():
129
+ _, uncond = self.conditioner(negative, metadata)
130
+ else:
131
+ uncond = default_uncond
132
+ batch_size = len(prompts)
133
+ if generator is None and seed is not None:
134
+ generator = torch.Generator(device=self._execution_device).manual_seed(seed)
135
+ latents = self.prepare_latents(
136
+ batch_size=batch_size,
137
+ num_channels=channels,
138
+ height=height,
139
+ width=width,
140
+ generator=generator,
141
+ latents=latents,
142
+ )
143
+ self.scheduler.set_timesteps(
144
+ num_inference_steps=num_inference_steps,
145
+ guidance_scale=guidance_scale,
146
+ timeshift=timeshift,
147
+ order=order,
148
+ device=latents.device,
149
+ )
150
+ for timestep in self.scheduler.timesteps:
151
+ cfg_latents = torch.cat([latents, latents], dim=0)
152
+ cfg_t = timestep.repeat(cfg_latents.shape[0]).to(latents.device, dtype=latents.dtype)
153
+ cfg_condition = torch.cat([uncond, cond], dim=0)
154
+ model_output = self.transformer(
155
+ sample=cfg_latents,
156
+ timestep=cfg_t,
157
+ encoder_hidden_states=cfg_condition,
158
+ ).sample
159
+ model_output = self.scheduler.classifier_free_guidance(model_output)
160
+ latents = self.scheduler.step(
161
+ model_output=model_output,
162
+ timestep=timestep,
163
+ sample=latents,
164
+ ).prev_sample
165
+
166
+ image = self.vae.decode(latents)
167
+ images_uint8 = self._fp_to_uint8(image).permute(0, 2, 3, 1).cpu().numpy()
168
+ if output_type == "pil":
169
+ output = [Image.fromarray(img) for img in images_uint8]
170
+ elif output_type == "pt":
171
+ output = torch.from_numpy(images_uint8)
172
+ elif output_type == "np":
173
+ output = images_uint8
174
+ else:
175
+ raise ValueError(f"Unsupported output_type: {output_type}")
176
+
177
+ if not return_dict:
178
+ return (output,)
179
+ return PixNerdPipelineOutput(images=output)
180
+
181
+ __all__ = [
182
+ "PixNerdPipeline",
183
+ "PixNerdPipelineOutput",
184
+ ]
PixNerd-XL-16-256/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PixNerdFlowMatchScheduler",
3
+ "_diffusers_version": "0.36.0",
4
+ "guidance_interval_max": 1.0,
5
+ "guidance_interval_min": 0.0,
6
+ "guidance_scale": 3.5,
7
+ "last_step": null,
8
+ "num_inference_steps": 100,
9
+ "num_train_timesteps": 1000,
10
+ "order": 2,
11
+ "timeshift": 3.0
12
+ }
PixNerd-XL-16-256/scheduling_pixnerd_flow_match.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
8
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
9
+ from diffusers.utils import BaseOutput
10
+
11
+ @dataclass
12
+ class PixNerdSchedulerOutput(BaseOutput):
13
+ prev_sample: torch.Tensor
14
+
15
+
16
+ class PixNerdFlowMatchScheduler(SchedulerMixin, ConfigMixin):
17
+ """
18
+ Diffusers-compatible scheduler wrapper for PixNerd's AdamLM flow-matching sampler.
19
+ """
20
+
21
+ config_name = "scheduler_config.json"
22
+ order = 1
23
+ init_noise_sigma = 1.0
24
+
25
+ @staticmethod
26
+ def _lagrange_coeffs(order: int, pre_ts: torch.Tensor, t_start: torch.Tensor, t_end: torch.Tensor) -> List[float]:
27
+ ts = [float(v) for v in pre_ts[-order:].tolist()]
28
+ a = float(t_start)
29
+ b = float(t_end)
30
+
31
+ if order == 1:
32
+ return [1.0]
33
+ if order == 2:
34
+ t1, t2 = ts
35
+ int1 = 0.5 / (t1 - t2) * ((b - t2) ** 2 - (a - t2) ** 2)
36
+ int2 = 0.5 / (t2 - t1) * ((b - t1) ** 2 - (a - t1) ** 2)
37
+ total = int1 + int2
38
+ return [int1 / total, int2 / total]
39
+ if order == 3:
40
+ t1, t2, t3 = ts
41
+ int1_denom = (t1 - t2) * (t1 - t3)
42
+ int1 = ((1 / 3) * b**3 - 0.5 * (t2 + t3) * b**2 + (t2 * t3) * b) - (
43
+ (1 / 3) * a**3 - 0.5 * (t2 + t3) * a**2 + (t2 * t3) * a
44
+ )
45
+ int1 = int1 / int1_denom
46
+ int2_denom = (t2 - t1) * (t2 - t3)
47
+ int2 = ((1 / 3) * b**3 - 0.5 * (t1 + t3) * b**2 + (t1 * t3) * b) - (
48
+ (1 / 3) * a**3 - 0.5 * (t1 + t3) * a**2 + (t1 * t3) * a
49
+ )
50
+ int2 = int2 / int2_denom
51
+ int3_denom = (t3 - t1) * (t3 - t2)
52
+ int3 = ((1 / 3) * b**3 - 0.5 * (t1 + t2) * b**2 + (t1 * t2) * b) - (
53
+ (1 / 3) * a**3 - 0.5 * (t1 + t2) * a**2 + (t1 * t2) * a
54
+ )
55
+ int3 = int3 / int3_denom
56
+ total = int1 + int2 + int3
57
+ return [int1 / total, int2 / total, int3 / total]
58
+ if order == 4:
59
+ t1, t2, t3, t4 = ts
60
+ int1_denom = (t1 - t2) * (t1 - t3) * (t1 - t4)
61
+ int1 = ((1 / 4) * b**4 - (1 / 3) * (t2 + t3 + t4) * b**3 + 0.5 * (t3 * t4 + t2 * t3 + t2 * t4) * b**2 - (t2 * t3 * t4) * b) - (
62
+ (1 / 4) * a**4 - (1 / 3) * (t2 + t3 + t4) * a**3 + 0.5 * (t3 * t4 + t2 * t3 + t2 * t4) * a**2 - (t2 * t3 * t4) * a
63
+ )
64
+ int1 = int1 / int1_denom
65
+ int2_denom = (t2 - t1) * (t2 - t3) * (t2 - t4)
66
+ int2 = ((1 / 4) * b**4 - (1 / 3) * (t1 + t3 + t4) * b**3 + 0.5 * (t3 * t4 + t1 * t3 + t1 * t4) * b**2 - (t1 * t3 * t4) * b) - (
67
+ (1 / 4) * a**4 - (1 / 3) * (t1 + t3 + t4) * a**3 + 0.5 * (t3 * t4 + t1 * t3 + t1 * t4) * a**2 - (t1 * t3 * t4) * a
68
+ )
69
+ int2 = int2 / int2_denom
70
+ int3_denom = (t3 - t1) * (t3 - t2) * (t3 - t4)
71
+ int3 = ((1 / 4) * b**4 - (1 / 3) * (t1 + t2 + t4) * b**3 + 0.5 * (t4 * t2 + t1 * t2 + t1 * t4) * b**2 - (t1 * t2 * t4) * b) - (
72
+ (1 / 4) * a**4 - (1 / 3) * (t1 + t2 + t4) * a**3 + 0.5 * (t4 * t2 + t1 * t2 + t1 * t4) * a**2 - (t1 * t2 * t4) * a
73
+ )
74
+ int3 = int3 / int3_denom
75
+ int4_denom = (t4 - t1) * (t4 - t2) * (t4 - t3)
76
+ int4 = ((1 / 4) * b**4 - (1 / 3) * (t1 + t2 + t3) * b**3 + 0.5 * (t3 * t2 + t1 * t2 + t1 * t3) * b**2 - (t1 * t2 * t3) * b) - (
77
+ (1 / 4) * a**4 - (1 / 3) * (t1 + t2 + t3) * a**3 + 0.5 * (t3 * t2 + t1 * t2 + t1 * t3) * a**2 - (t1 * t2 * t3) * a
78
+ )
79
+ int4 = int4 / int4_denom
80
+ total = int1 + int2 + int3 + int4
81
+ return [int1 / total, int2 / total, int3 / total, int4 / total]
82
+ raise ValueError(f"Unsupported solver order: {order}.")
83
+
84
+ @register_to_config
85
+ def __init__(
86
+ self,
87
+ num_train_timesteps: int = 1000,
88
+ num_inference_steps: int = 25,
89
+ guidance_scale: float = 4.0,
90
+ timeshift: float = 3.0,
91
+ order: int = 2,
92
+ guidance_interval_min: float = 0.0,
93
+ guidance_interval_max: float = 1.0,
94
+ last_step: Optional[float] = None,
95
+ ) -> None:
96
+ self.num_inference_steps = int(num_inference_steps)
97
+ self.guidance_scale = float(guidance_scale)
98
+ self.timeshift = float(timeshift)
99
+ self.order = int(order)
100
+ self.guidance_interval_min = float(guidance_interval_min)
101
+ self.guidance_interval_max = float(guidance_interval_max)
102
+ self.last_step = last_step
103
+ self._reset_state()
104
+
105
+ @classmethod
106
+ def from_sampler_spec(cls, sampler_spec: Dict[str, Any]) -> "PixNerdFlowMatchScheduler":
107
+ init_args = dict(sampler_spec.get("init_args", {}))
108
+ return cls(
109
+ num_inference_steps=int(init_args.get("num_steps", 25)),
110
+ guidance_scale=float(init_args.get("guidance", 4.0)),
111
+ timeshift=float(init_args.get("timeshift", 3.0)),
112
+ order=int(init_args.get("order", 2)),
113
+ guidance_interval_min=float(init_args.get("guidance_interval_min", 0.0)),
114
+ guidance_interval_max=float(init_args.get("guidance_interval_max", 1.0)),
115
+ last_step=init_args.get("last_step"),
116
+ )
117
+
118
+ def _reset_state(self) -> None:
119
+ self.timesteps: Optional[torch.Tensor] = None
120
+ self._timedeltas: Optional[torch.Tensor] = None
121
+ self._solver_coeffs = None
122
+ self._model_outputs = []
123
+ self._step_index = 0
124
+
125
+ @staticmethod
126
+ def _shift_respace_fn(t: torch.Tensor, shift: float = 3.0) -> torch.Tensor:
127
+ return t / (t + (1 - t) * shift)
128
+
129
+ def _build_solver_state(
130
+ self,
131
+ num_inference_steps: int,
132
+ timeshift: float,
133
+ device: Optional[Union[str, torch.device]] = None,
134
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[List[float]]]:
135
+ last_step = self.last_step
136
+ if last_step is None:
137
+ last_step = 1.0 / float(num_inference_steps)
138
+
139
+ endpoints = torch.linspace(0.0, 1 - float(last_step), int(num_inference_steps), dtype=torch.float32)
140
+ endpoints = torch.cat([endpoints, torch.tensor([1.0], dtype=torch.float32)], dim=0)
141
+ timesteps = self._shift_respace_fn(endpoints, timeshift).to(device=device)
142
+ timedeltas = (timesteps[1:] - timesteps[:-1]).to(device=device)
143
+
144
+ solver_coeffs: List[List[float]] = [[] for _ in range(int(num_inference_steps))]
145
+ for i in range(int(num_inference_steps)):
146
+ order = min(self.order, i + 1)
147
+ pre_ts = timesteps[: i + 1]
148
+ coeffs = self._lagrange_coeffs(order, pre_ts, pre_ts[i], timesteps[i + 1])
149
+ solver_coeffs[i] = coeffs
150
+ return timesteps[:-1], timedeltas, solver_coeffs
151
+
152
+ def set_timesteps(
153
+ self,
154
+ num_inference_steps: Optional[int] = None,
155
+ device: Optional[Union[str, torch.device]] = None,
156
+ timeshift: Optional[float] = None,
157
+ guidance_scale: Optional[float] = None,
158
+ order: Optional[int] = None,
159
+ **kwargs: Any,
160
+ ) -> None:
161
+ if num_inference_steps is not None:
162
+ self.num_inference_steps = int(num_inference_steps)
163
+ if timeshift is not None:
164
+ self.timeshift = float(timeshift)
165
+ if guidance_scale is not None:
166
+ self.guidance_scale = float(guidance_scale)
167
+ if order is not None:
168
+ self.order = int(order)
169
+
170
+ timesteps, timedeltas, solver_coeffs = self._build_solver_state(
171
+ self.num_inference_steps,
172
+ self.timeshift,
173
+ device=device,
174
+ )
175
+ self.timesteps = timesteps
176
+ self._timedeltas = timedeltas
177
+ self._solver_coeffs = solver_coeffs
178
+ self._model_outputs = []
179
+ self._step_index = 0
180
+
181
+ def scale_model_input(self, sample: torch.Tensor, timestep: Optional[torch.Tensor] = None) -> torch.Tensor:
182
+ return sample
183
+
184
+ def classifier_free_guidance(self, model_output: torch.Tensor) -> torch.Tensor:
185
+ if model_output.shape[0] % 2 != 0:
186
+ raise ValueError("Classifier-free guidance expects concatenated unconditional/conditional batches.")
187
+ uncond, cond = model_output.chunk(2, dim=0)
188
+ return uncond + self.guidance_scale * (cond - uncond)
189
+
190
+ def step(
191
+ self,
192
+ model_output: torch.Tensor,
193
+ timestep: Union[torch.Tensor, float, int],
194
+ sample: torch.Tensor,
195
+ return_dict: bool = True,
196
+ **kwargs: Any,
197
+ ) -> Union[PixNerdSchedulerOutput, Tuple[torch.Tensor]]:
198
+ if self.timesteps is None or self._timedeltas is None or self._solver_coeffs is None:
199
+ raise RuntimeError("`set_timesteps` must be called before `step`.")
200
+ if self._step_index >= len(self._solver_coeffs):
201
+ raise RuntimeError("Scheduler step index exceeded configured timesteps.")
202
+
203
+ coeffs = self._solver_coeffs[self._step_index]
204
+ self._model_outputs.append(model_output)
205
+ order = len(coeffs)
206
+ pred = torch.zeros_like(model_output)
207
+ recent = self._model_outputs[-order:]
208
+ for coeff, output in zip(coeffs, recent):
209
+ pred = pred + coeff * output
210
+
211
+ prev_sample = sample + pred * self._timedeltas[self._step_index]
212
+ self._step_index += 1
213
+
214
+ if not return_dict:
215
+ return (prev_sample,)
216
+ return PixNerdSchedulerOutput(prev_sample=prev_sample)
217
+
218
+ def add_noise(
219
+ self,
220
+ original_samples: torch.Tensor,
221
+ noise: torch.Tensor,
222
+ timesteps: torch.Tensor,
223
+ ) -> torch.Tensor:
224
+ alpha = timesteps.view(-1, 1, 1, 1)
225
+ sigma = (1.0 - timesteps).view(-1, 1, 1, 1)
226
+ return alpha * original_samples + sigma * noise
227
+
228
+ __all__ = [
229
+ "PixNerdFlowMatchScheduler",
230
+ "PixNerdSchedulerOutput",
231
+ ]
PixNerd-XL-16-256/transformer/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PixNerdTransformer2DModel",
3
+ "_diffusers_version": "0.36.0",
4
+ "compile_denoiser": false,
5
+ "conditioner_spec": {
6
+ "class_path": "diffusers_modules.local.modeling_pixnerd_transformer_2d.LabelConditioner",
7
+ "init_args": {
8
+ "num_classes": 1000
9
+ }
10
+ },
11
+ "denoiser_spec": {
12
+ "class_path": "diffusers_modules.local.modeling_pixnerd_transformer_2d.PixNerDiT",
13
+ "init_args": {
14
+ "hidden_size": 1152,
15
+ "hidden_size_x": 64,
16
+ "in_channels": 3,
17
+ "nerf_mlpratio": 2,
18
+ "num_blocks": 30,
19
+ "num_classes": 1000,
20
+ "num_cond_blocks": 26,
21
+ "num_groups": 16,
22
+ "patch_size": 16
23
+ }
24
+ },
25
+ "diffusion_trainer_spec": null,
26
+ "ema_decay": 0.9999,
27
+ "use_ema": true,
28
+ "vae_spec": {
29
+ "class_path": "diffusers_modules.local.modeling_pixnerd_transformer_2d.PixelAE",
30
+ "init_args": {
31
+ "scale": 1.0
32
+ }
33
+ }
34
+ }
PixNerd-XL-16-256/transformer/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4c07eb4a85cf3b6f4272f526902a35323436821e3060ab162b5bd104c443362
3
+ size 5604788640
PixNerd-XL-16-512/conversion_metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architecture": {
3
+ "hidden_size": 1152,
4
+ "hidden_size_x": 64,
5
+ "in_channels": 3,
6
+ "nerf_mlpratio": 2,
7
+ "num_blocks": 30,
8
+ "num_classes": 1000,
9
+ "num_cond_blocks": 26,
10
+ "num_groups": 16,
11
+ "patch_size": 16
12
+ },
13
+ "checkpoint": "D:\\sakura-project\\PixNerd-diffusers\\raw\\imagenet512\\res512_ft200k_epoch%3D325-step%3D1800000_emainit.ckpt",
14
+ "source_prefix": "ema_denoiser."
15
+ }
PixNerd-XL-16-512/model_index.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PixNerdPipeline",
3
+ "_diffusers_version": "0.30.0",
4
+ "scheduler": [
5
+ "diffusers_modules.local.scheduling_pixnerd_flow_match",
6
+ "PixNerdFlowMatchScheduler"
7
+ ],
8
+ "transformer": [
9
+ "diffusers_modules.local.modeling_pixnerd_transformer_2d",
10
+ "PixNerdTransformer2DModel"
11
+ ]
12
+ }
PixNerd-XL-16-512/modeling_pixnerd_transformer_2d.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ import importlib
5
+ import math
6
+ from dataclasses import dataclass
7
+ from functools import lru_cache
8
+ from typing import Any, Dict, Iterable, List, Optional, Tuple
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
13
+ from diffusers.models.modeling_utils import ModelMixin
14
+ from diffusers.utils import BaseOutput
15
+ from torch.nn.functional import scaled_dot_product_attention
16
+
17
+ class BaseAE(torch.nn.Module):
18
+ def __init__(self, scale=1.0, shift=0.0):
19
+ super().__init__()
20
+ self.scale = scale
21
+ self.shift = shift
22
+
23
+ def encode(self, x):
24
+ return self._impl_encode(x) #.to(torch.bfloat16)
25
+
26
+ # @torch.autocast("cuda", dtype=torch.bfloat16)
27
+ def decode(self, x):
28
+ return self._impl_decode(x) #.to(torch.bfloat16)
29
+
30
+ def _impl_encode(self, x):
31
+ raise NotImplementedError
32
+
33
+ def _impl_decode(self, x):
34
+ raise NotImplementedError
35
+
36
+ def uint82fp(x):
37
+ x = x.to(torch.float32)
38
+ x = (x - 127.5) / 127.5
39
+ return x
40
+
41
+ def fp2uint8(x):
42
+ x = torch.clip_((x + 1) * 127.5 + 0.5, 0, 255).to(torch.uint8)
43
+ return x
44
+
45
+
46
+ class PixelAE(BaseAE):
47
+ def __init__(self, scale=1.0, shift=0.0):
48
+ super().__init__(scale, shift)
49
+
50
+ def _impl_encode(self, x):
51
+ return x/self.scale+self.shift
52
+
53
+ def _impl_decode(self, x):
54
+ return (x-self.shift)*self.scale
55
+
56
+
57
+ def resolve_conditioner_device(metadata: dict, fallback: torch.device | None = None) -> torch.device:
58
+ if metadata is None:
59
+ metadata = {}
60
+ if "device" in metadata and metadata["device"] is not None:
61
+ return torch.device(metadata["device"])
62
+ if fallback is not None:
63
+ return fallback
64
+ return torch.device("cuda" if torch.cuda.is_available() else "cpu")
65
+
66
+
67
+ class BaseConditioner(nn.Module):
68
+ def __init__(self):
69
+ super(BaseConditioner, self).__init__()
70
+
71
+ def _impl_condition(self, y, metadata)->torch.Tensor:
72
+ raise NotImplementedError()
73
+
74
+ def _impl_uncondition(self, y, metadata)->torch.Tensor:
75
+ raise NotImplementedError()
76
+
77
+ @torch.no_grad()
78
+ def __call__(self, y, metadata:dict={}):
79
+ condition = self._impl_condition(y, metadata)
80
+ uncondition = self._impl_uncondition(y, metadata)
81
+ if condition.dtype in [torch.float64, torch.float32, torch.float16]:
82
+ condition = condition.to(torch.bfloat16)
83
+ if uncondition.dtype in [torch.float64,torch.float32, torch.float16]:
84
+ uncondition = uncondition.to(torch.bfloat16)
85
+ return condition, uncondition
86
+
87
+
88
+ class ComposeConditioner(BaseConditioner):
89
+ def __init__(self, conditioners:List[BaseConditioner]):
90
+ super().__init__()
91
+ self.conditioners = conditioners
92
+
93
+ def _impl_condition(self, y, metadata):
94
+ condition = []
95
+ for conditioner in self.conditioners:
96
+ condition.append(conditioner._impl_condition(y, metadata))
97
+ condition = torch.cat(condition, dim=1)
98
+ return condition
99
+
100
+ def _impl_uncondition(self, y, metadata):
101
+ uncondition = []
102
+ for conditioner in self.conditioners:
103
+ uncondition.append(conditioner._impl_uncondition(y, metadata))
104
+ uncondition = torch.cat(uncondition, dim=1)
105
+ return uncondition
106
+
107
+
108
+ class LabelConditioner(BaseConditioner):
109
+ def __init__(self, num_classes):
110
+ super().__init__()
111
+ self.null_condition = num_classes
112
+
113
+ def _impl_condition(self, y, metadata):
114
+ device = resolve_conditioner_device(metadata)
115
+ return torch.tensor(y, device=device).long()
116
+
117
+ def _impl_uncondition(self, y, metadata):
118
+ device = resolve_conditioner_device(metadata)
119
+ return torch.full((len(y),), self.null_condition, dtype=torch.long, device=device)
120
+
121
+
122
+ def modulate(x, shift, scale):
123
+ return x * (1 + scale) + shift
124
+
125
+ class Embed(nn.Module):
126
+ def __init__(
127
+ self,
128
+ in_chans: int = 3,
129
+ embed_dim: int = 768,
130
+ norm_layer = None,
131
+ bias: bool = True,
132
+ ):
133
+ super().__init__()
134
+ self.in_chans = in_chans
135
+ self.embed_dim = embed_dim
136
+ self.proj = nn.Linear(in_chans, embed_dim, bias=bias)
137
+ self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
138
+ def forward(self, x):
139
+ x = self.proj(x)
140
+ x = self.norm(x)
141
+ return x
142
+
143
+ class TimestepEmbedder(nn.Module):
144
+
145
+ def __init__(self, hidden_size, frequency_embedding_size=256):
146
+ super().__init__()
147
+ self.mlp = nn.Sequential(
148
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
149
+ nn.SiLU(),
150
+ nn.Linear(hidden_size, hidden_size, bias=True),
151
+ )
152
+ self.frequency_embedding_size = frequency_embedding_size
153
+
154
+ @staticmethod
155
+ def timestep_embedding(t, dim, max_period=10):
156
+ half = dim // 2
157
+ freqs = torch.exp(
158
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half
159
+ )
160
+ args = t[..., None].float() * freqs[None, ...]
161
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
162
+ if dim % 2:
163
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
164
+ return embedding
165
+
166
+ def forward(self, t):
167
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
168
+ t_emb = self.mlp(t_freq)
169
+ return t_emb
170
+
171
+ class LabelEmbedder(nn.Module):
172
+ def __init__(self, num_classes, hidden_size):
173
+ super().__init__()
174
+ self.embedding_table = nn.Embedding(num_classes, hidden_size)
175
+ self.num_classes = num_classes
176
+
177
+ def forward(self, labels,):
178
+ embeddings = self.embedding_table(labels)
179
+ return embeddings
180
+
181
+ class FinalLayer(nn.Module):
182
+ def __init__(self, hidden_size, out_channels):
183
+ super().__init__()
184
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
185
+ self.linear = nn.Linear(hidden_size, out_channels, bias=True)
186
+ self.adaLN_modulation = nn.Sequential(
187
+ nn.Linear(hidden_size, 2*hidden_size, bias=True)
188
+ )
189
+
190
+ def forward(self, x, c):
191
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1)
192
+ x = modulate(self.norm_final(x), shift, scale)
193
+ x = self.linear(x)
194
+ return x
195
+
196
+ class RMSNorm(nn.Module):
197
+ def __init__(self, hidden_size, eps=1e-6):
198
+ """
199
+ LlamaRMSNorm is equivalent to T5LayerNorm
200
+ """
201
+ super().__init__()
202
+ self.weight = nn.Parameter(torch.ones(hidden_size))
203
+ self.variance_epsilon = eps
204
+
205
+ def forward(self, hidden_states):
206
+ input_dtype = hidden_states.dtype
207
+ hidden_states = hidden_states.to(torch.float32)
208
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
209
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
210
+ return self.weight * hidden_states.to(input_dtype)
211
+
212
+ class FeedForward(nn.Module):
213
+ def __init__(
214
+ self,
215
+ dim: int,
216
+ hidden_dim: int,
217
+ ):
218
+ super().__init__()
219
+ hidden_dim = int(2 * hidden_dim / 3)
220
+ self.w1 = nn.Linear(dim, hidden_dim, bias=False)
221
+ self.w3 = nn.Linear(dim, hidden_dim, bias=False)
222
+ self.w2 = nn.Linear(hidden_dim, dim, bias=False)
223
+ def forward(self, x):
224
+ x = self.w2(torch.nn.functional.silu(self.w1(x)) * self.w3(x))
225
+ return x
226
+
227
+ def precompute_freqs_cis_2d(dim: int, height: int, width:int, theta: float = 10000.0, scale=16.0):
228
+ # assert H * H == end
229
+ # flat_patch_pos = torch.linspace(-1, 1, end) # N = end
230
+ x_pos = torch.linspace(0, scale, width)
231
+ y_pos = torch.linspace(0, scale, height)
232
+ y_pos, x_pos = torch.meshgrid(y_pos, x_pos, indexing="ij")
233
+ y_pos = y_pos.reshape(-1)
234
+ x_pos = x_pos.reshape(-1)
235
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) # Hc/4
236
+ x_freqs = torch.outer(x_pos, freqs).float() # N Hc/4
237
+ y_freqs = torch.outer(y_pos, freqs).float() # N Hc/4
238
+ x_cis = torch.polar(torch.ones_like(x_freqs), x_freqs)
239
+ y_cis = torch.polar(torch.ones_like(y_freqs), y_freqs)
240
+ freqs_cis = torch.cat([x_cis.unsqueeze(dim=-1), y_cis.unsqueeze(dim=-1)], dim=-1) # N,Hc/4,2
241
+ freqs_cis = freqs_cis.reshape(height*width, -1)
242
+ return freqs_cis
243
+
244
+
245
+ def apply_rotary_emb(
246
+ xq: torch.Tensor,
247
+ xk: torch.Tensor,
248
+ freqs_cis: torch.Tensor,
249
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
250
+ freqs_cis = freqs_cis[None, :, None, :]
251
+ # xq : B N H Hc
252
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) # B N H Hc/2
253
+ xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
254
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) # B, N, H, Hc
255
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
256
+ return xq_out.type_as(xq), xk_out.type_as(xk)
257
+
258
+
259
+ class RAttention(nn.Module):
260
+ def __init__(
261
+ self,
262
+ dim: int,
263
+ num_heads: int = 8,
264
+ qkv_bias: bool = False,
265
+ qk_norm: bool = True,
266
+ attn_drop: float = 0.,
267
+ proj_drop: float = 0.,
268
+ norm_layer: nn.Module = RMSNorm,
269
+ ) -> None:
270
+ super().__init__()
271
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
272
+
273
+ self.dim = dim
274
+ self.num_heads = num_heads
275
+ self.head_dim = dim // num_heads
276
+ self.scale = self.head_dim ** -0.5
277
+
278
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
279
+ self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
280
+ self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
281
+ self.attn_drop = nn.Dropout(attn_drop)
282
+ self.proj = nn.Linear(dim, dim)
283
+ self.proj_drop = nn.Dropout(proj_drop)
284
+
285
+ def forward(self, x: torch.Tensor, pos, mask) -> torch.Tensor:
286
+ B, N, C = x.shape
287
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 1, 3, 4)
288
+ q, k, v = qkv[0], qkv[1], qkv[2] # B N H Hc
289
+ q = self.q_norm(q)
290
+ k = self.k_norm(k)
291
+ q, k = apply_rotary_emb(q, k, freqs_cis=pos)
292
+ q = q.view(B, -1, self.num_heads, C // self.num_heads).transpose(1, 2) # B, H, N, Hc
293
+ k = k.view(B, -1, self.num_heads, C // self.num_heads).transpose(1, 2).contiguous() # B, H, N, Hc
294
+ v = v.view(B, -1, self.num_heads, C // self.num_heads).transpose(1, 2).contiguous()
295
+
296
+ x = scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0)
297
+
298
+ x = x.transpose(1, 2).reshape(B, N, C)
299
+ x = self.proj(x)
300
+ x = self.proj_drop(x)
301
+ return x
302
+
303
+
304
+
305
+ class FlattenDiTBlock(nn.Module):
306
+ def __init__(self, hidden_size, groups, mlp_ratio=4.0, ):
307
+ super().__init__()
308
+ self.norm1 = RMSNorm(hidden_size, eps=1e-6)
309
+ self.attn = RAttention(hidden_size, num_heads=groups, qkv_bias=False)
310
+ self.norm2 = RMSNorm(hidden_size, eps=1e-6)
311
+ mlp_hidden_dim = int(hidden_size * mlp_ratio)
312
+ self.mlp = FeedForward(hidden_size, mlp_hidden_dim)
313
+ self.adaLN_modulation = nn.Sequential(
314
+ nn.Linear(hidden_size, 6 * hidden_size, bias=True)
315
+ )
316
+
317
+ def forward(self, x, c, pos, mask=None):
318
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(6, dim=-1)
319
+ x = x + gate_msa * self.attn(modulate(self.norm1(x), shift_msa, scale_msa), pos, mask=mask)
320
+ x = x + gate_mlp * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp))
321
+ return x
322
+
323
+ class NerfEmbedder(nn.Module):
324
+ def __init__(self, in_channels, hidden_size_input, max_freqs):
325
+ super().__init__()
326
+ self.max_freqs = max_freqs
327
+ self.hidden_size_input = hidden_size_input
328
+ self.embedder = nn.Sequential(
329
+ nn.Linear(in_channels+max_freqs**2, hidden_size_input, bias=True),
330
+ )
331
+
332
+ @lru_cache
333
+ def fetch_pos(self, patch_size, device, dtype):
334
+ pos_x = torch.linspace(0, 1, patch_size, device=device, dtype=dtype)
335
+ pos_y = torch.linspace(0, 1, patch_size, device=device, dtype=dtype)
336
+ pos_y, pos_x = torch.meshgrid(pos_y, pos_x, indexing="ij")
337
+ pos_x = pos_x.reshape(-1, 1, 1)
338
+ pos_y = pos_y.reshape(-1, 1, 1)
339
+
340
+ freqs = torch.linspace(0, self.max_freqs, self.max_freqs, dtype=dtype, device=device)
341
+ freqs_x = freqs[None, :, None]
342
+ freqs_y = freqs[None, None, :]
343
+ coeffs = (1 + freqs_x * freqs_y) ** -1
344
+ dct_x = torch.cos(pos_x * freqs_x * torch.pi)
345
+ dct_y = torch.cos(pos_y * freqs_y * torch.pi)
346
+ dct = (dct_x * dct_y * coeffs).view(1, -1, self.max_freqs ** 2)
347
+ return dct
348
+
349
+
350
+ def forward(self, inputs):
351
+ B, P2, C = inputs.shape
352
+ patch_size = int(P2 ** 0.5)
353
+ device = inputs.device
354
+ dtype = inputs.dtype
355
+ dct = self.fetch_pos(patch_size, device, dtype)
356
+ dct = dct.repeat(B, 1, 1)
357
+ inputs = torch.cat([inputs, dct], dim=-1)
358
+ inputs = self.embedder(inputs)
359
+ return inputs
360
+
361
+
362
+ class NerfBlock(nn.Module):
363
+ def __init__(self, hidden_size_s, hidden_size_x, mlp_ratio=4):
364
+ super().__init__()
365
+ self.param_generator1 = nn.Sequential(
366
+ nn.Linear(hidden_size_s, 2*hidden_size_x**2*mlp_ratio, bias=True),
367
+ )
368
+ self.norm = RMSNorm(hidden_size_x, eps=1e-6)
369
+ self.mlp_ratio = mlp_ratio
370
+ def forward(self, x, s):
371
+ batch_size, num_x, hidden_size_x = x.shape
372
+ mlp_params1 = self.param_generator1(s)
373
+ fc1_param1, fc2_param1 = mlp_params1.chunk(2, dim=-1)
374
+ fc1_param1 = fc1_param1.view(batch_size, hidden_size_x, hidden_size_x*self.mlp_ratio)
375
+ fc2_param1 = fc2_param1.view(batch_size, hidden_size_x*self.mlp_ratio, hidden_size_x)
376
+
377
+ # normalize fc1
378
+ normalized_fc1_param1 = torch.nn.functional.normalize(fc1_param1, dim=-2)
379
+ # normalize fc2
380
+ normalized_fc2_param1 = torch.nn.functional.normalize(fc2_param1, dim=-2)
381
+ # mlp 1
382
+ res_x = x
383
+ x = self.norm(x)
384
+ x = torch.bmm(x, normalized_fc1_param1)
385
+ x = torch.nn.functional.silu(x)
386
+ x = torch.bmm(x, normalized_fc2_param1)
387
+ x = x + res_x
388
+ return x
389
+
390
+ class NerfFinalLayer(nn.Module):
391
+ def __init__(self, hidden_size, out_channels):
392
+ super().__init__()
393
+ self.norm = RMSNorm(hidden_size, eps=1e-6)
394
+ self.linear = nn.Linear(hidden_size, out_channels, bias=True)
395
+ def forward(self, x):
396
+ x = self.norm(x)
397
+ x = self.linear(x)
398
+ return x
399
+
400
+ class PixNerDiT(nn.Module):
401
+ def __init__(
402
+ self,
403
+ in_channels=4,
404
+ num_groups=12,
405
+ hidden_size=1152,
406
+ hidden_size_x=64,
407
+ nerf_mlpratio=4,
408
+ num_blocks=18,
409
+ num_cond_blocks=4,
410
+ patch_size=2,
411
+ num_classes=1000,
412
+ learn_sigma=True,
413
+ deep_supervision=0,
414
+ weight_path=None,
415
+ load_ema=False,
416
+ ):
417
+ super().__init__()
418
+ self.deep_supervision = deep_supervision
419
+ self.learn_sigma = learn_sigma
420
+ self.in_channels = in_channels
421
+ self.out_channels = in_channels
422
+ self.hidden_size = hidden_size
423
+ self.num_groups = num_groups
424
+ self.num_blocks = num_blocks
425
+ self.num_cond_blocks = num_cond_blocks
426
+ self.patch_size = patch_size
427
+ self.x_embedder = NerfEmbedder(in_channels, hidden_size_x, max_freqs=8)
428
+ self.s_embedder = Embed(in_channels*patch_size**2, hidden_size, bias=True)
429
+ self.t_embedder = TimestepEmbedder(hidden_size)
430
+ self.y_embedder = LabelEmbedder(num_classes+1, hidden_size)
431
+
432
+ self.final_layer = NerfFinalLayer(hidden_size_x, self.out_channels)
433
+
434
+ self.weight_path = weight_path
435
+
436
+ self.load_ema = load_ema
437
+ self.blocks = nn.ModuleList([
438
+ FlattenDiTBlock(self.hidden_size, self.num_groups) for _ in range(self.num_cond_blocks)
439
+ ])
440
+ self.blocks.extend([
441
+ NerfBlock(self.hidden_size, hidden_size_x, nerf_mlpratio) for _ in range(self.num_cond_blocks, self.num_blocks)
442
+ ])
443
+ self.initialize_weights()
444
+ self.precompute_pos = dict()
445
+
446
+ def fetch_pos(self, height, width, device):
447
+ if (height, width) in self.precompute_pos:
448
+ return self.precompute_pos[(height, width)].to(device)
449
+ else:
450
+ pos = precompute_freqs_cis_2d(self.hidden_size // self.num_groups, height, width).to(device)
451
+ self.precompute_pos[(height, width)] = pos
452
+ return pos
453
+
454
+ def initialize_weights(self):
455
+ # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):
456
+ w = self.s_embedder.proj.weight.data
457
+ nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
458
+ nn.init.constant_(self.s_embedder.proj.bias, 0)
459
+
460
+ # Initialize label embedding table:
461
+ nn.init.normal_(self.y_embedder.embedding_table.weight, std=0.02)
462
+
463
+ # Initialize timestep embedding MLP:
464
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
465
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
466
+
467
+ # zero init final layer
468
+ nn.init.zeros_(self.final_layer.linear.weight)
469
+ nn.init.zeros_(self.final_layer.linear.bias)
470
+
471
+
472
+ def forward(self, x, t, y, s=None, mask=None):
473
+ B, _, H, W = x.shape
474
+ pos = self.fetch_pos(H//self.patch_size, W//self.patch_size, x.device)
475
+ x = torch.nn.functional.unfold(x, kernel_size=self.patch_size, stride=self.patch_size).transpose(1, 2)
476
+ t = self.t_embedder(t.view(-1)).view(B, -1, self.hidden_size)
477
+ y = self.y_embedder(y).view(B, 1, self.hidden_size)
478
+ c = nn.functional.silu(t + y)
479
+ if s is None:
480
+ s = self.s_embedder(x)
481
+ for i in range(self.num_cond_blocks):
482
+ s = self.blocks[i](s, c, pos, mask)
483
+ s = nn.functional.silu(t + s)
484
+ batch_size, length, _ = s.shape
485
+ x = x.reshape(batch_size*length, self.in_channels, self.patch_size**2)
486
+ x = x.transpose(1, 2)
487
+ s = s.view(batch_size*length, self.hidden_size)
488
+ x = self.x_embedder(x)
489
+ for i in range(self.num_cond_blocks, self.num_blocks):
490
+ x = self.blocks[i](x, s)
491
+ x = self.final_layer(x)
492
+ x = x.transpose(1, 2)
493
+ x = x.reshape(batch_size, length, -1)
494
+ x = torch.nn.functional.fold(x.transpose(1, 2).contiguous(), (H, W), kernel_size=self.patch_size, stride=self.patch_size)
495
+ return x
496
+
497
+
498
+ def to_container(config: Any) -> Any:
499
+ if hasattr(config, "items") and not isinstance(config, dict):
500
+ return {k: to_container(v) for k, v in config.items()}
501
+ if isinstance(config, list):
502
+ return [to_container(v) for v in config]
503
+ return config
504
+
505
+
506
+ def load_symbol(path: str) -> Any:
507
+ module_path, name = path.rsplit(".", 1)
508
+ module = importlib.import_module(module_path)
509
+ return getattr(module, name)
510
+
511
+
512
+ def instantiate_from_spec(spec: Any) -> Any:
513
+ spec = to_container(spec)
514
+ if isinstance(spec, dict) and "class_path" in spec:
515
+ class_or_fn = load_symbol(spec["class_path"])
516
+ init_args = spec.get("init_args", {})
517
+ if isinstance(init_args, dict):
518
+ init_args = {k: instantiate_from_spec(v) for k, v in init_args.items()}
519
+ return class_or_fn(**init_args)
520
+ if isinstance(spec, dict):
521
+ return {k: instantiate_from_spec(v) for k, v in spec.items()}
522
+ if isinstance(spec, list):
523
+ return [instantiate_from_spec(v) for v in spec]
524
+ if isinstance(spec, str) and "." in spec:
525
+ try:
526
+ return load_symbol(spec)
527
+ except Exception:
528
+ return spec
529
+ return spec
530
+
531
+
532
+ def clone_spec(spec: Dict[str, Any]) -> Dict[str, Any]:
533
+ return copy.deepcopy(to_container(spec))
534
+
535
+
536
+ def load_prefixed_state_dict(
537
+ module: Optional[torch.nn.Module],
538
+ state_dict: Dict[str, torch.Tensor],
539
+ prefixes: Iterable[str],
540
+ ) -> bool:
541
+ if module is None:
542
+ return False
543
+ for prefix in prefixes:
544
+ subset = {
545
+ key[len(prefix) :]: value
546
+ for key, value in state_dict.items()
547
+ if key.startswith(prefix)
548
+ }
549
+ if subset:
550
+ module.load_state_dict(subset, strict=False)
551
+ return True
552
+ return False
553
+
554
+
555
+ @dataclass
556
+ class PixNerdTransformer2DModelOutput(BaseOutput):
557
+ sample: torch.FloatTensor
558
+
559
+
560
+ class PixNerdTransformer2DModel(ModelMixin, ConfigMixin):
561
+ config_name = "config.json"
562
+
563
+ @register_to_config
564
+ def __init__(
565
+ self,
566
+ denoiser_spec: Dict[str, Any],
567
+ conditioner_spec: Dict[str, Any],
568
+ vae_spec: Optional[Dict[str, Any]] = None,
569
+ diffusion_trainer_spec: Optional[Dict[str, Any]] = None,
570
+ use_ema: bool = True,
571
+ ema_decay: float = 0.9999,
572
+ compile_denoiser: bool = False,
573
+ ) -> None:
574
+ super().__init__()
575
+ self.denoiser = instantiate_from_spec(to_container(denoiser_spec))
576
+ self.conditioner = instantiate_from_spec(to_container(conditioner_spec))
577
+ self.vae = instantiate_from_spec(to_container(vae_spec)) if vae_spec is not None else None
578
+ self.diffusion_trainer = (
579
+ instantiate_from_spec(to_container(diffusion_trainer_spec))
580
+ if diffusion_trainer_spec is not None
581
+ else None
582
+ )
583
+
584
+ self.use_ema = bool(use_ema)
585
+ self.ema_decay = float(ema_decay)
586
+ self.ema_denoiser = copy.deepcopy(self.denoiser) if self.use_ema else None
587
+ if self.ema_denoiser is not None:
588
+ self.ema_denoiser.to(torch.float32)
589
+
590
+ if compile_denoiser and hasattr(self.denoiser, "compile"):
591
+ self.denoiser.compile()
592
+ if self.ema_denoiser is not None:
593
+ self.ema_denoiser.compile()
594
+
595
+ self._freeze_non_trainable_modules()
596
+ if self.ema_denoiser is not None:
597
+ self.sync_ema()
598
+
599
+ @property
600
+ def patch_size(self) -> int:
601
+ return int(getattr(self.denoiser, "patch_size", 1))
602
+
603
+ @property
604
+ def in_channels(self) -> int:
605
+ return int(getattr(self.denoiser, "in_channels", 3))
606
+
607
+ @classmethod
608
+ def from_project_config(
609
+ cls,
610
+ model_config: Dict[str, Any],
611
+ use_ema: bool = True,
612
+ compile_denoiser: bool = False,
613
+ ) -> "PixNerdTransformer2DModel":
614
+ model_config = to_container(model_config)
615
+ ema_decay = model_config.get("ema_tracker", {}).get("init_args", {}).get("decay", 0.9999)
616
+ return cls(
617
+ denoiser_spec=model_config["denoiser"],
618
+ conditioner_spec=model_config["conditioner"],
619
+ vae_spec=model_config.get("vae"),
620
+ diffusion_trainer_spec=model_config.get("diffusion_trainer"),
621
+ use_ema=use_ema,
622
+ ema_decay=ema_decay,
623
+ compile_denoiser=compile_denoiser,
624
+ )
625
+
626
+ @staticmethod
627
+ def _as_timestep_tensor(
628
+ timestep: Any,
629
+ batch_size: int,
630
+ device: torch.device,
631
+ ) -> torch.Tensor:
632
+ if isinstance(timestep, torch.Tensor):
633
+ if timestep.ndim == 0:
634
+ return timestep.repeat(batch_size).to(device=device, dtype=torch.float32)
635
+ return timestep.to(device=device, dtype=torch.float32)
636
+ return torch.full((batch_size,), float(timestep), device=device, dtype=torch.float32)
637
+
638
+ def _freeze_module(self, module: Optional[torch.nn.Module]) -> None:
639
+ if module is None:
640
+ return
641
+ module.eval()
642
+ for parameter in module.parameters():
643
+ parameter.requires_grad = False
644
+
645
+ def _freeze_non_trainable_modules(self) -> None:
646
+ self._freeze_module(self.conditioner)
647
+ self._freeze_module(self.vae)
648
+ self._freeze_module(self.ema_denoiser)
649
+
650
+ def forward(
651
+ self,
652
+ sample: torch.Tensor,
653
+ timestep: Any,
654
+ encoder_hidden_states: torch.Tensor,
655
+ return_dict: bool = True,
656
+ ) -> PixNerdTransformer2DModelOutput | Tuple[torch.Tensor]:
657
+ t = self._as_timestep_tensor(timestep, sample.shape[0], sample.device)
658
+ out = self.denoiser(sample, t, encoder_hidden_states)
659
+ if not return_dict:
660
+ return (out,)
661
+ return PixNerdTransformer2DModelOutput(sample=out)
662
+
663
+ def predict_noise(
664
+ self,
665
+ sample: torch.Tensor,
666
+ timestep: Any,
667
+ encoder_hidden_states: torch.Tensor,
668
+ use_ema: bool = False,
669
+ ) -> torch.Tensor:
670
+ t = self._as_timestep_tensor(timestep, sample.shape[0], sample.device)
671
+ denoiser = self.get_inference_denoiser(use_ema=use_ema)
672
+ return denoiser(sample, t, encoder_hidden_states)
673
+
674
+ def get_inference_denoiser(self, use_ema: bool = True) -> torch.nn.Module:
675
+ if use_ema and self.ema_denoiser is not None:
676
+ return self.ema_denoiser
677
+ return self.denoiser
678
+
679
+ @torch.no_grad()
680
+ def get_conditioning(
681
+ self,
682
+ y: Iterable[Any],
683
+ metadata: Optional[Dict[str, Any]] = None,
684
+ ):
685
+ metadata = {} if metadata is None else metadata
686
+ return self.conditioner(y, metadata)
687
+
688
+ @torch.no_grad()
689
+ def encode(self, x: torch.Tensor) -> torch.Tensor:
690
+ if self.vae is None:
691
+ return x
692
+ return self.vae.encode(x)
693
+
694
+ @torch.no_grad()
695
+ def decode(self, latents: torch.Tensor) -> torch.Tensor:
696
+ if self.vae is None:
697
+ return latents
698
+ return self.vae.decode(latents)
699
+
700
+ @torch.no_grad()
701
+ def sync_ema(self) -> None:
702
+ if self.ema_denoiser is None:
703
+ return
704
+ self.ema_denoiser.load_state_dict(self.denoiser.state_dict(), strict=True)
705
+ self.ema_denoiser.to(torch.float32)
706
+
707
+ @torch.no_grad()
708
+ def ema_step(self, decay: Optional[float] = None) -> None:
709
+ if self.ema_denoiser is None:
710
+ return
711
+ decay = self.ema_decay if decay is None else float(decay)
712
+ for ema_param, param in zip(self.ema_denoiser.parameters(), self.denoiser.parameters()):
713
+ ema_param.mul_(decay).add_(param.detach().float(), alpha=1.0 - decay)
714
+
715
+ def compute_training_loss(
716
+ self,
717
+ x: torch.Tensor,
718
+ y: Iterable[Any],
719
+ scheduler: torch.nn.Module,
720
+ metadata: Optional[Dict[str, Any]] = None,
721
+ ) -> Dict[str, torch.Tensor]:
722
+ if self.diffusion_trainer is None:
723
+ raise RuntimeError("diffusion_trainer is not configured.")
724
+ metadata = {} if metadata is None else metadata
725
+
726
+ with torch.no_grad():
727
+ x = self.encode(x)
728
+ condition, uncondition = self.get_conditioning(y, metadata)
729
+
730
+ return self.diffusion_trainer(
731
+ self.denoiser,
732
+ self.ema_denoiser if self.ema_denoiser is not None else self.denoiser,
733
+ scheduler,
734
+ x,
735
+ condition,
736
+ uncondition,
737
+ metadata,
738
+ )
739
+
740
+ __all__ = [
741
+ "PixNerDiT",
742
+ "LabelConditioner",
743
+ "PixelAE",
744
+ "PixNerdTransformer2DModel",
745
+ "PixNerdTransformer2DModelOutput",
746
+ ]
PixNerd-XL-16-512/pipeline.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import List, Optional, Sequence, Union
5
+
6
+ import torch
7
+ from diffusers import DiffusionPipeline
8
+ from diffusers.image_processor import VaeImageProcessor
9
+ from diffusers.utils import BaseOutput
10
+ from PIL import Image
11
+
12
+ from .modeling_pixnerd_transformer_2d import PixNerdTransformer2DModel
13
+ from .scheduling_pixnerd_flow_match import PixNerdFlowMatchScheduler
14
+
15
+ ConditioningInput = Union[str, int, Sequence[Union[str, int]]]
16
+
17
+
18
+ @dataclass
19
+ class PixNerdPipelineOutput(BaseOutput):
20
+ images: Union[List[Image.Image], torch.Tensor, "np.ndarray"]
21
+
22
+
23
+ class PixNerdPipeline(DiffusionPipeline):
24
+ model_cpu_offload_seq = "conditioner->transformer->vae"
25
+ _callback_tensor_inputs = ["latents"]
26
+
27
+ def __init__(
28
+ self,
29
+ transformer,
30
+ scheduler: PixNerdFlowMatchScheduler,
31
+ vae=None,
32
+ conditioner=None,
33
+ ):
34
+ super().__init__()
35
+ if vae is None:
36
+ vae = getattr(transformer, "vae", None)
37
+ if conditioner is None:
38
+ conditioner = getattr(transformer, "conditioner", None)
39
+ if vae is None or conditioner is None:
40
+ raise ValueError("Pipeline requires `vae` and `conditioner` either explicitly or from `transformer`.")
41
+ self.register_modules(
42
+ vae=vae,
43
+ conditioner=conditioner,
44
+ transformer=transformer,
45
+ scheduler=scheduler,
46
+ )
47
+ self.image_processor = VaeImageProcessor(vae_scale_factor=1)
48
+
49
+ @staticmethod
50
+ def _fp_to_uint8(image: torch.Tensor) -> torch.Tensor:
51
+ return torch.clip_((image + 1) * 127.5 + 0.5, 0, 255).to(torch.uint8)
52
+
53
+ @staticmethod
54
+ def _to_list(y: ConditioningInput) -> List[Union[str, int]]:
55
+ if isinstance(y, (str, int)):
56
+ return [y]
57
+ return list(y)
58
+
59
+ @staticmethod
60
+ def _repeat(values: List[Union[str, int]], repeats: int) -> List[Union[str, int]]:
61
+ if repeats == 1:
62
+ return values
63
+ expanded: List[Union[str, int]] = []
64
+ for value in values:
65
+ expanded.extend([value] * repeats)
66
+ return expanded
67
+
68
+ def encode_prompt(
69
+ self,
70
+ prompt: ConditioningInput,
71
+ num_images_per_prompt: int,
72
+ ):
73
+ prompts = self._repeat(self._to_list(prompt), num_images_per_prompt)
74
+ metadata = {"device": self._execution_device}
75
+ with torch.no_grad():
76
+ cond, uncond = self.conditioner(prompts, metadata)
77
+ return cond, uncond, prompts
78
+
79
+ def prepare_latents(
80
+ self,
81
+ batch_size: int,
82
+ num_channels: int,
83
+ height: int,
84
+ width: int,
85
+ generator: Optional[torch.Generator] = None,
86
+ latents: Optional[torch.Tensor] = None,
87
+ ) -> torch.Tensor:
88
+ if latents is not None:
89
+ return latents.to(device=self._execution_device, dtype=torch.float32)
90
+ return torch.randn(
91
+ (batch_size, num_channels, height, width),
92
+ generator=generator,
93
+ device=self._execution_device,
94
+ dtype=torch.float32,
95
+ )
96
+
97
+ @torch.no_grad()
98
+ def __call__(
99
+ self,
100
+ prompt: ConditioningInput,
101
+ negative_prompt: Optional[ConditioningInput] = None,
102
+ num_images_per_prompt: int = 1,
103
+ height: int = 512,
104
+ width: int = 512,
105
+ num_inference_steps: int = 25,
106
+ guidance_scale: float = 4.0,
107
+ generator: Optional[torch.Generator] = None,
108
+ seed: Optional[int] = None,
109
+ latents: Optional[torch.Tensor] = None,
110
+ output_type: str = "pil",
111
+ return_dict: bool = True,
112
+ timeshift: float = 3.0,
113
+ order: int = 2,
114
+ ) -> PixNerdPipelineOutput | tuple:
115
+ patch_size = int(getattr(self.transformer, "patch_size", 1))
116
+ channels = int(getattr(self.transformer, "in_channels", 3))
117
+ height = (height // patch_size) * patch_size
118
+ width = (width // patch_size) * patch_size
119
+
120
+ if hasattr(self.transformer, "decoder_patch_scaling_h"):
121
+ self.transformer.decoder_patch_scaling_h = height / 512
122
+ self.transformer.decoder_patch_scaling_w = width / 512
123
+
124
+ cond, default_uncond, prompts = self.encode_prompt(prompt, num_images_per_prompt)
125
+ if negative_prompt is not None:
126
+ negative = self._repeat(self._to_list(negative_prompt), num_images_per_prompt)
127
+ metadata = {"device": self._execution_device}
128
+ with torch.no_grad():
129
+ _, uncond = self.conditioner(negative, metadata)
130
+ else:
131
+ uncond = default_uncond
132
+ batch_size = len(prompts)
133
+ if generator is None and seed is not None:
134
+ generator = torch.Generator(device=self._execution_device).manual_seed(seed)
135
+ latents = self.prepare_latents(
136
+ batch_size=batch_size,
137
+ num_channels=channels,
138
+ height=height,
139
+ width=width,
140
+ generator=generator,
141
+ latents=latents,
142
+ )
143
+ self.scheduler.set_timesteps(
144
+ num_inference_steps=num_inference_steps,
145
+ guidance_scale=guidance_scale,
146
+ timeshift=timeshift,
147
+ order=order,
148
+ device=latents.device,
149
+ )
150
+ for timestep in self.scheduler.timesteps:
151
+ cfg_latents = torch.cat([latents, latents], dim=0)
152
+ cfg_t = timestep.repeat(cfg_latents.shape[0]).to(latents.device, dtype=latents.dtype)
153
+ cfg_condition = torch.cat([uncond, cond], dim=0)
154
+ model_output = self.transformer(
155
+ sample=cfg_latents,
156
+ timestep=cfg_t,
157
+ encoder_hidden_states=cfg_condition,
158
+ ).sample
159
+ model_output = self.scheduler.classifier_free_guidance(model_output)
160
+ latents = self.scheduler.step(
161
+ model_output=model_output,
162
+ timestep=timestep,
163
+ sample=latents,
164
+ ).prev_sample
165
+
166
+ image = self.vae.decode(latents)
167
+ images_uint8 = self._fp_to_uint8(image).permute(0, 2, 3, 1).cpu().numpy()
168
+ if output_type == "pil":
169
+ output = [Image.fromarray(img) for img in images_uint8]
170
+ elif output_type == "pt":
171
+ output = torch.from_numpy(images_uint8)
172
+ elif output_type == "np":
173
+ output = images_uint8
174
+ else:
175
+ raise ValueError(f"Unsupported output_type: {output_type}")
176
+
177
+ if not return_dict:
178
+ return (output,)
179
+ return PixNerdPipelineOutput(images=output)
180
+
181
+ __all__ = [
182
+ "PixNerdPipeline",
183
+ "PixNerdPipelineOutput",
184
+ ]
PixNerd-XL-16-512/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PixNerdFlowMatchScheduler",
3
+ "_diffusers_version": "0.36.0",
4
+ "guidance_interval_max": 1.0,
5
+ "guidance_interval_min": 0.0,
6
+ "guidance_scale": 3.5,
7
+ "last_step": null,
8
+ "num_inference_steps": 100,
9
+ "num_train_timesteps": 1000,
10
+ "order": 2,
11
+ "timeshift": 3.0
12
+ }
PixNerd-XL-16-512/scheduling_pixnerd_flow_match.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
8
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
9
+ from diffusers.utils import BaseOutput
10
+
11
+ @dataclass
12
+ class PixNerdSchedulerOutput(BaseOutput):
13
+ prev_sample: torch.Tensor
14
+
15
+
16
+ class PixNerdFlowMatchScheduler(SchedulerMixin, ConfigMixin):
17
+ """
18
+ Diffusers-compatible scheduler wrapper for PixNerd's AdamLM flow-matching sampler.
19
+ """
20
+
21
+ config_name = "scheduler_config.json"
22
+ order = 1
23
+ init_noise_sigma = 1.0
24
+
25
+ @staticmethod
26
+ def _lagrange_coeffs(order: int, pre_ts: torch.Tensor, t_start: torch.Tensor, t_end: torch.Tensor) -> List[float]:
27
+ ts = [float(v) for v in pre_ts[-order:].tolist()]
28
+ a = float(t_start)
29
+ b = float(t_end)
30
+
31
+ if order == 1:
32
+ return [1.0]
33
+ if order == 2:
34
+ t1, t2 = ts
35
+ int1 = 0.5 / (t1 - t2) * ((b - t2) ** 2 - (a - t2) ** 2)
36
+ int2 = 0.5 / (t2 - t1) * ((b - t1) ** 2 - (a - t1) ** 2)
37
+ total = int1 + int2
38
+ return [int1 / total, int2 / total]
39
+ if order == 3:
40
+ t1, t2, t3 = ts
41
+ int1_denom = (t1 - t2) * (t1 - t3)
42
+ int1 = ((1 / 3) * b**3 - 0.5 * (t2 + t3) * b**2 + (t2 * t3) * b) - (
43
+ (1 / 3) * a**3 - 0.5 * (t2 + t3) * a**2 + (t2 * t3) * a
44
+ )
45
+ int1 = int1 / int1_denom
46
+ int2_denom = (t2 - t1) * (t2 - t3)
47
+ int2 = ((1 / 3) * b**3 - 0.5 * (t1 + t3) * b**2 + (t1 * t3) * b) - (
48
+ (1 / 3) * a**3 - 0.5 * (t1 + t3) * a**2 + (t1 * t3) * a
49
+ )
50
+ int2 = int2 / int2_denom
51
+ int3_denom = (t3 - t1) * (t3 - t2)
52
+ int3 = ((1 / 3) * b**3 - 0.5 * (t1 + t2) * b**2 + (t1 * t2) * b) - (
53
+ (1 / 3) * a**3 - 0.5 * (t1 + t2) * a**2 + (t1 * t2) * a
54
+ )
55
+ int3 = int3 / int3_denom
56
+ total = int1 + int2 + int3
57
+ return [int1 / total, int2 / total, int3 / total]
58
+ if order == 4:
59
+ t1, t2, t3, t4 = ts
60
+ int1_denom = (t1 - t2) * (t1 - t3) * (t1 - t4)
61
+ int1 = ((1 / 4) * b**4 - (1 / 3) * (t2 + t3 + t4) * b**3 + 0.5 * (t3 * t4 + t2 * t3 + t2 * t4) * b**2 - (t2 * t3 * t4) * b) - (
62
+ (1 / 4) * a**4 - (1 / 3) * (t2 + t3 + t4) * a**3 + 0.5 * (t3 * t4 + t2 * t3 + t2 * t4) * a**2 - (t2 * t3 * t4) * a
63
+ )
64
+ int1 = int1 / int1_denom
65
+ int2_denom = (t2 - t1) * (t2 - t3) * (t2 - t4)
66
+ int2 = ((1 / 4) * b**4 - (1 / 3) * (t1 + t3 + t4) * b**3 + 0.5 * (t3 * t4 + t1 * t3 + t1 * t4) * b**2 - (t1 * t3 * t4) * b) - (
67
+ (1 / 4) * a**4 - (1 / 3) * (t1 + t3 + t4) * a**3 + 0.5 * (t3 * t4 + t1 * t3 + t1 * t4) * a**2 - (t1 * t3 * t4) * a
68
+ )
69
+ int2 = int2 / int2_denom
70
+ int3_denom = (t3 - t1) * (t3 - t2) * (t3 - t4)
71
+ int3 = ((1 / 4) * b**4 - (1 / 3) * (t1 + t2 + t4) * b**3 + 0.5 * (t4 * t2 + t1 * t2 + t1 * t4) * b**2 - (t1 * t2 * t4) * b) - (
72
+ (1 / 4) * a**4 - (1 / 3) * (t1 + t2 + t4) * a**3 + 0.5 * (t4 * t2 + t1 * t2 + t1 * t4) * a**2 - (t1 * t2 * t4) * a
73
+ )
74
+ int3 = int3 / int3_denom
75
+ int4_denom = (t4 - t1) * (t4 - t2) * (t4 - t3)
76
+ int4 = ((1 / 4) * b**4 - (1 / 3) * (t1 + t2 + t3) * b**3 + 0.5 * (t3 * t2 + t1 * t2 + t1 * t3) * b**2 - (t1 * t2 * t3) * b) - (
77
+ (1 / 4) * a**4 - (1 / 3) * (t1 + t2 + t3) * a**3 + 0.5 * (t3 * t2 + t1 * t2 + t1 * t3) * a**2 - (t1 * t2 * t3) * a
78
+ )
79
+ int4 = int4 / int4_denom
80
+ total = int1 + int2 + int3 + int4
81
+ return [int1 / total, int2 / total, int3 / total, int4 / total]
82
+ raise ValueError(f"Unsupported solver order: {order}.")
83
+
84
+ @register_to_config
85
+ def __init__(
86
+ self,
87
+ num_train_timesteps: int = 1000,
88
+ num_inference_steps: int = 25,
89
+ guidance_scale: float = 4.0,
90
+ timeshift: float = 3.0,
91
+ order: int = 2,
92
+ guidance_interval_min: float = 0.0,
93
+ guidance_interval_max: float = 1.0,
94
+ last_step: Optional[float] = None,
95
+ ) -> None:
96
+ self.num_inference_steps = int(num_inference_steps)
97
+ self.guidance_scale = float(guidance_scale)
98
+ self.timeshift = float(timeshift)
99
+ self.order = int(order)
100
+ self.guidance_interval_min = float(guidance_interval_min)
101
+ self.guidance_interval_max = float(guidance_interval_max)
102
+ self.last_step = last_step
103
+ self._reset_state()
104
+
105
+ @classmethod
106
+ def from_sampler_spec(cls, sampler_spec: Dict[str, Any]) -> "PixNerdFlowMatchScheduler":
107
+ init_args = dict(sampler_spec.get("init_args", {}))
108
+ return cls(
109
+ num_inference_steps=int(init_args.get("num_steps", 25)),
110
+ guidance_scale=float(init_args.get("guidance", 4.0)),
111
+ timeshift=float(init_args.get("timeshift", 3.0)),
112
+ order=int(init_args.get("order", 2)),
113
+ guidance_interval_min=float(init_args.get("guidance_interval_min", 0.0)),
114
+ guidance_interval_max=float(init_args.get("guidance_interval_max", 1.0)),
115
+ last_step=init_args.get("last_step"),
116
+ )
117
+
118
+ def _reset_state(self) -> None:
119
+ self.timesteps: Optional[torch.Tensor] = None
120
+ self._timedeltas: Optional[torch.Tensor] = None
121
+ self._solver_coeffs = None
122
+ self._model_outputs = []
123
+ self._step_index = 0
124
+
125
+ @staticmethod
126
+ def _shift_respace_fn(t: torch.Tensor, shift: float = 3.0) -> torch.Tensor:
127
+ return t / (t + (1 - t) * shift)
128
+
129
+ def _build_solver_state(
130
+ self,
131
+ num_inference_steps: int,
132
+ timeshift: float,
133
+ device: Optional[Union[str, torch.device]] = None,
134
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[List[float]]]:
135
+ last_step = self.last_step
136
+ if last_step is None:
137
+ last_step = 1.0 / float(num_inference_steps)
138
+
139
+ endpoints = torch.linspace(0.0, 1 - float(last_step), int(num_inference_steps), dtype=torch.float32)
140
+ endpoints = torch.cat([endpoints, torch.tensor([1.0], dtype=torch.float32)], dim=0)
141
+ timesteps = self._shift_respace_fn(endpoints, timeshift).to(device=device)
142
+ timedeltas = (timesteps[1:] - timesteps[:-1]).to(device=device)
143
+
144
+ solver_coeffs: List[List[float]] = [[] for _ in range(int(num_inference_steps))]
145
+ for i in range(int(num_inference_steps)):
146
+ order = min(self.order, i + 1)
147
+ pre_ts = timesteps[: i + 1]
148
+ coeffs = self._lagrange_coeffs(order, pre_ts, pre_ts[i], timesteps[i + 1])
149
+ solver_coeffs[i] = coeffs
150
+ return timesteps[:-1], timedeltas, solver_coeffs
151
+
152
+ def set_timesteps(
153
+ self,
154
+ num_inference_steps: Optional[int] = None,
155
+ device: Optional[Union[str, torch.device]] = None,
156
+ timeshift: Optional[float] = None,
157
+ guidance_scale: Optional[float] = None,
158
+ order: Optional[int] = None,
159
+ **kwargs: Any,
160
+ ) -> None:
161
+ if num_inference_steps is not None:
162
+ self.num_inference_steps = int(num_inference_steps)
163
+ if timeshift is not None:
164
+ self.timeshift = float(timeshift)
165
+ if guidance_scale is not None:
166
+ self.guidance_scale = float(guidance_scale)
167
+ if order is not None:
168
+ self.order = int(order)
169
+
170
+ timesteps, timedeltas, solver_coeffs = self._build_solver_state(
171
+ self.num_inference_steps,
172
+ self.timeshift,
173
+ device=device,
174
+ )
175
+ self.timesteps = timesteps
176
+ self._timedeltas = timedeltas
177
+ self._solver_coeffs = solver_coeffs
178
+ self._model_outputs = []
179
+ self._step_index = 0
180
+
181
+ def scale_model_input(self, sample: torch.Tensor, timestep: Optional[torch.Tensor] = None) -> torch.Tensor:
182
+ return sample
183
+
184
+ def classifier_free_guidance(self, model_output: torch.Tensor) -> torch.Tensor:
185
+ if model_output.shape[0] % 2 != 0:
186
+ raise ValueError("Classifier-free guidance expects concatenated unconditional/conditional batches.")
187
+ uncond, cond = model_output.chunk(2, dim=0)
188
+ return uncond + self.guidance_scale * (cond - uncond)
189
+
190
+ def step(
191
+ self,
192
+ model_output: torch.Tensor,
193
+ timestep: Union[torch.Tensor, float, int],
194
+ sample: torch.Tensor,
195
+ return_dict: bool = True,
196
+ **kwargs: Any,
197
+ ) -> Union[PixNerdSchedulerOutput, Tuple[torch.Tensor]]:
198
+ if self.timesteps is None or self._timedeltas is None or self._solver_coeffs is None:
199
+ raise RuntimeError("`set_timesteps` must be called before `step`.")
200
+ if self._step_index >= len(self._solver_coeffs):
201
+ raise RuntimeError("Scheduler step index exceeded configured timesteps.")
202
+
203
+ coeffs = self._solver_coeffs[self._step_index]
204
+ self._model_outputs.append(model_output)
205
+ order = len(coeffs)
206
+ pred = torch.zeros_like(model_output)
207
+ recent = self._model_outputs[-order:]
208
+ for coeff, output in zip(coeffs, recent):
209
+ pred = pred + coeff * output
210
+
211
+ prev_sample = sample + pred * self._timedeltas[self._step_index]
212
+ self._step_index += 1
213
+
214
+ if not return_dict:
215
+ return (prev_sample,)
216
+ return PixNerdSchedulerOutput(prev_sample=prev_sample)
217
+
218
+ def add_noise(
219
+ self,
220
+ original_samples: torch.Tensor,
221
+ noise: torch.Tensor,
222
+ timesteps: torch.Tensor,
223
+ ) -> torch.Tensor:
224
+ alpha = timesteps.view(-1, 1, 1, 1)
225
+ sigma = (1.0 - timesteps).view(-1, 1, 1, 1)
226
+ return alpha * original_samples + sigma * noise
227
+
228
+ __all__ = [
229
+ "PixNerdFlowMatchScheduler",
230
+ "PixNerdSchedulerOutput",
231
+ ]
PixNerd-XL-16-512/transformer/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PixNerdTransformer2DModel",
3
+ "_diffusers_version": "0.36.0",
4
+ "compile_denoiser": false,
5
+ "conditioner_spec": {
6
+ "class_path": "diffusers_modules.local.modeling_pixnerd_transformer_2d.LabelConditioner",
7
+ "init_args": {
8
+ "num_classes": 1000
9
+ }
10
+ },
11
+ "denoiser_spec": {
12
+ "class_path": "diffusers_modules.local.modeling_pixnerd_transformer_2d.PixNerDiT",
13
+ "init_args": {
14
+ "hidden_size": 1152,
15
+ "hidden_size_x": 64,
16
+ "in_channels": 3,
17
+ "nerf_mlpratio": 2,
18
+ "num_blocks": 30,
19
+ "num_classes": 1000,
20
+ "num_cond_blocks": 26,
21
+ "num_groups": 16,
22
+ "patch_size": 16
23
+ }
24
+ },
25
+ "diffusion_trainer_spec": null,
26
+ "ema_decay": 0.9999,
27
+ "use_ema": true,
28
+ "vae_spec": {
29
+ "class_path": "diffusers_modules.local.modeling_pixnerd_transformer_2d.PixelAE",
30
+ "init_args": {
31
+ "scale": 1.0
32
+ }
33
+ }
34
+ }
PixNerd-XL-16-512/transformer/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6370c63da922adb7e12c4c5a865f4799e48e3706acea397e93badbbb69743c55
3
+ size 5604788640