dagloop5 commited on
Commit
be54e60
·
verified ·
1 Parent(s): 44241e7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +921 -0
app.py ADDED
@@ -0,0 +1,921 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # Installation and Setup
3
+ # =============================================================================
4
+ import os
5
+ import subprocess
6
+ import sys
7
+
8
+ os.environ["TORCH_COMPILE_DISABLE"] = "1"
9
+ os.environ["TORCHDYNAMO_DISABLE"] = "1"
10
+
11
+ subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
12
+
13
+ LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
+ LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
+ LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2"
16
+
17
+ if not os.path.exists(LTX_REPO_DIR):
18
+ print(f"Cloning {LTX_REPO_URL}...")
19
+ subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True)
20
+ subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True)
21
+
22
+ print("Installing ltx-core and ltx-pipelines from cloned repo...")
23
+ subprocess.run(
24
+ [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
25
+ os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
26
+ "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
27
+ check=True,
28
+ )
29
+
30
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
31
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
32
+
33
+ # =============================================================================
34
+ # Imports
35
+ # =============================================================================
36
+ import logging
37
+ import random
38
+ import tempfile
39
+ from pathlib import Path
40
+ import gc
41
+ import hashlib
42
+
43
+ import torch
44
+ torch._dynamo.config.suppress_errors = True
45
+ torch._dynamo.config.disable = True
46
+
47
+ import spaces
48
+ import gradio as gr
49
+ import numpy as np
50
+ from huggingface_hub import hf_hub_download, snapshot_download
51
+ from safetensors.torch import load_file, save_file
52
+
53
+ from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number
54
+ from ltx_core.model.audio_vae import decode_audio as vae_decode_audio
55
+ from ltx_core.model.video_vae import decode_video as vae_decode_video
56
+ from ltx_core.model.upsampler import upsample_video
57
+ from ltx_core.quantization import QuantizationPolicy
58
+ from ltx_core.loader import LoraPathStrengthAndSDOps, LTXV_LORA_COMFY_RENAMING_MAP
59
+ from ltx_core.components.guiders import MultiModalGuider, MultiModalGuiderParams
60
+ from ltx_core.components.noisers import GaussianNoiser
61
+ from ltx_core.components.diffusion_steps import Res2sDiffusionStep
62
+ from ltx_core.components.schedulers import LTX2Scheduler
63
+ from ltx_core.types import Audio, LatentState, VideoPixelShape, AudioLatentShape
64
+ from ltx_core.tools import VideoLatentShape
65
+
66
+ from ltx_pipelines.ti2vid_two_stages_hq import TI2VidTwoStagesHQPipeline
67
+ from ltx_pipelines.utils.args import ImageConditioningInput
68
+ from ltx_pipelines.utils.constants import LTX_2_3_HQ_PARAMS, STAGE_2_DISTILLED_SIGMA_VALUES
69
+ from ltx_pipelines.utils.media_io import encode_video
70
+ from ltx_pipelines.utils.helpers import (
71
+ assert_resolution,
72
+ cleanup_memory,
73
+ combined_image_conditionings,
74
+ encode_prompts,
75
+ multi_modal_guider_denoising_func,
76
+ simple_denoising_func,
77
+ denoise_audio_video,
78
+ )
79
+
80
+ from ltx_pipelines.utils import res2s_audio_video_denoising_loop
81
+
82
+ # Patch xformers
83
+ try:
84
+ from ltx_core.model.transformer import attention as _attn_mod
85
+ from xformers.ops import memory_efficient_attention as _mea
86
+ _attn_mod.memory_efficient_attention = _mea
87
+ print("[ATTN] xformers patch applied")
88
+ except Exception as e:
89
+ print(f"[ATTN] xformers patch failed: {e}")
90
+
91
+ logging.getLogger().setLevel(logging.INFO)
92
+
93
+ MAX_SEED = np.iinfo(np.int32).max
94
+ DEFAULT_PROMPT = (
95
+ "A majestic eagle soaring over mountain peaks at sunset, "
96
+ "wings spread wide against the orange sky, feathers catching the light, "
97
+ "wind currents visible in the motion blur, cinematic slow motion, 4K quality"
98
+ )
99
+ DEFAULT_NEGATIVE_PROMPT = (
100
+ "worst quality, inconsistent motion, blurry, jittery, distorted, "
101
+ "deformed, artifacts, text, watermark, logo, frame, border, "
102
+ "low resolution, pixelated, unnatural, fake, CGI, cartoon"
103
+ )
104
+ DEFAULT_FRAME_RATE = 24.0
105
+ MIN_DIM, MAX_DIM, STEP = 256, 1280, 64
106
+ MIN_FRAMES, MAX_FRAMES = 9, 257
107
+
108
+ # Resolution presets with high/low tiers
109
+ RESOLUTIONS = {
110
+ "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
111
+ "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
112
+ }
113
+
114
+ LTX_MODEL_REPO = "Lightricks/LTX-2.3"
115
+ GEMMA_REPO = "Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
116
+
117
+ # =============================================================================
118
+ # Custom HQ Pipeline with LoRA Cache Support
119
+ # =============================================================================
120
+
121
+ class HQPipelineWithCachedLoRA:
122
+ """
123
+ Custom HQ pipeline that:
124
+ 1. Creates ONE ModelLedger WITHOUT LoRAs
125
+ 2. Handles ALL LoRAs via cached state (distilled + 12 custom)
126
+ 3. Supports CFG/negative prompts and guidance parameters
127
+ 4. Reuses single transformer for both stages
128
+ """
129
+
130
+ def __init__(
131
+ self,
132
+ checkpoint_path: str,
133
+ spatial_upsampler_path: str,
134
+ gemma_root: str,
135
+ quantization: QuantizationPolicy | None = None,
136
+ ):
137
+ from ltx_pipelines.utils import ModelLedger
138
+ from ltx_pipelines.utils.types import PipelineComponents
139
+
140
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
141
+ self.dtype = torch.bfloat16
142
+
143
+ # Create ONE ModelLedger for everything
144
+ print(" Creating ModelLedger (no LoRAs)...")
145
+ self.model_ledger = ModelLedger(
146
+ dtype=self.dtype,
147
+ device=self.device,
148
+ checkpoint_path=checkpoint_path,
149
+ gemma_root_path=gemma_root,
150
+ spatial_upsampler_path=spatial_upsampler_path,
151
+ loras=(), # NO LoRAs
152
+ quantization=quantization,
153
+ )
154
+
155
+ # Pipeline components
156
+ self.pipeline_components = PipelineComponents(
157
+ dtype=self.dtype,
158
+ device=self.device,
159
+ )
160
+
161
+ # Storage for cached LoRA state
162
+ self._cached_state = None
163
+
164
+ def apply_cached_lora_state(self, state_dict):
165
+ """Apply pre-cached LoRA state to transformer."""
166
+ self._cached_state = state_dict
167
+
168
+ @torch.inference_mode()
169
+ def __call__( # noqa: PLR0913
170
+ self,
171
+ prompt: str,
172
+ negative_prompt: str,
173
+ seed: int,
174
+ height: int,
175
+ width: int,
176
+ num_frames: int,
177
+ frame_rate: float,
178
+ num_inference_steps: int,
179
+ video_guider_params: MultiModalGuiderParams,
180
+ audio_guider_params: MultiModalGuiderParams,
181
+ images: list,
182
+ tiling_config: TilingConfig | None = None,
183
+ enhance_prompt: bool = False,
184
+ ):
185
+ from ltx_pipelines.utils import assert_resolution, cleanup_memory, combined_image_conditionings, encode_prompts, res2s_audio_video_denoising_loop, multi_modal_guider_denoising_func, simple_denoising_func, denoise_audio_video
186
+ from ltx_core.tools import VideoLatentShape
187
+ from ltx_core.components.noisers import GaussianNoiser
188
+ from ltx_core.components.diffusion_steps import Res2sDiffusionStep
189
+ from ltx_core.components.schedulers import LTX2Scheduler
190
+ from ltx_core.types import VideoPixelShape
191
+ from ltx_core.model.upsampler import upsample_video
192
+ from ltx_core.model.video_vae import decode_video as vae_decode_video
193
+ from ltx_core.model.audio_vae import decode_audio as vae_decode_audio
194
+
195
+ assert_resolution(height=height, width=width, is_two_stage=True)
196
+
197
+ device = self.device
198
+ dtype = self.dtype
199
+ generator = torch.Generator(device=device).manual_seed(seed)
200
+ noiser = GaussianNoiser(generator=generator)
201
+
202
+ # Apply cached LoRA state if available
203
+ if self._cached_state is not None:
204
+ print("[LoRA] Applying cached state to transformer...")
205
+ transformer = self.model_ledger.transformer()
206
+ with torch.no_grad():
207
+ transformer.load_state_dict(self._cached_state, strict=False)
208
+
209
+ ctx_p, ctx_n = encode_prompts(
210
+ [prompt, negative_prompt],
211
+ self.model_ledger,
212
+ enhance_first_prompt=enhance_prompt,
213
+ enhance_prompt_image=images[0][0] if len(images) > 0 else None,
214
+ enhance_prompt_seed=seed,
215
+ )
216
+
217
+ v_context_p, a_context_p = ctx_p.video_encoding, ctx_p.audio_encoding
218
+ v_context_n, a_context_n = ctx_n.video_encoding, ctx_n.audio_encoding
219
+
220
+ # ===================== STAGE 1 =====================
221
+ stage_1_output_shape = VideoPixelShape(
222
+ batch=1, frames=num_frames,
223
+ width=width // 2, height=height // 2, fps=frame_rate
224
+ )
225
+
226
+ video_encoder = self.model_ledger.video_encoder()
227
+ stage_1_conditionings = combined_image_conditionings(
228
+ images=images,
229
+ height=stage_1_output_shape.height,
230
+ width=stage_1_output_shape.width,
231
+ video_encoder=video_encoder,
232
+ dtype=dtype,
233
+ device=device,
234
+ )
235
+ torch.cuda.synchronize()
236
+ del video_encoder
237
+ cleanup_memory()
238
+
239
+ transformer = self.model_ledger.transformer()
240
+
241
+ empty_latent = torch.empty(VideoLatentShape.from_pixel_shape(stage_1_output_shape).to_torch_shape())
242
+ stepper = Res2sDiffusionStep()
243
+ sigmas = (
244
+ LTX2Scheduler()
245
+ .execute(latent=empty_latent, steps=num_inference_steps)
246
+ .to(dtype=torch.float32, device=device)
247
+ )
248
+
249
+ def first_stage_denoising_loop(sigmas, video_state, audio_state, stepper):
250
+ return res2s_audio_video_denoising_loop(
251
+ sigmas=sigmas,
252
+ video_state=video_state,
253
+ audio_state=audio_state,
254
+ stepper=stepper,
255
+ denoise_fn=multi_modal_guider_denoising_func(
256
+ video_guider=MultiModalGuider(params=video_guider_params, negative_context=v_context_n),
257
+ audio_guider=MultiModalGuider(params=audio_guider_params, negative_context=a_context_n),
258
+ v_context=v_context_p,
259
+ a_context=a_context_p,
260
+ transformer=transformer,
261
+ ),
262
+ )
263
+
264
+ video_state, audio_state = denoise_audio_video(
265
+ output_shape=stage_1_output_shape,
266
+ conditionings=stage_1_conditionings,
267
+ noiser=noiser,
268
+ sigmas=sigmas,
269
+ stepper=stepper,
270
+ denoising_loop_fn=first_stage_denoising_loop,
271
+ components=self.pipeline_components,
272
+ dtype=dtype,
273
+ device=device,
274
+ )
275
+
276
+ torch.cuda.synchronize()
277
+ del transformer
278
+ cleanup_memory()
279
+
280
+ # ===================== UPSCALING =====================
281
+ video_encoder = self.model_ledger.video_encoder()
282
+ upscaled_video_latent = upsample_video(
283
+ latent=video_state.latent[:1],
284
+ video_encoder=video_encoder,
285
+ upsampler=self.model_ledger.spatial_upsampler(),
286
+ )
287
+
288
+ stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
289
+ stage_2_conditionings = combined_image_conditionings(
290
+ images=images,
291
+ height=stage_2_output_shape.height,
292
+ width=stage_2_output_shape.width,
293
+ video_encoder=video_encoder,
294
+ dtype=dtype,
295
+ device=device,
296
+ )
297
+ torch.cuda.synchronize()
298
+ del video_encoder
299
+ cleanup_memory()
300
+
301
+ # ===================== STAGE 2 =====================
302
+ transformer = self.model_ledger.transformer()
303
+
304
+ from ltx_pipelines.utils.constants import STAGE_2_DISTILLED_SIGMA_VALUES
305
+ distilled_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=device)
306
+
307
+ def second_stage_denoising_loop(sigmas, video_state, audio_state, stepper):
308
+ return res2s_audio_video_denoising_loop(
309
+ sigmas=sigmas,
310
+ video_state=video_state,
311
+ audio_state=audio_state,
312
+ stepper=stepper,
313
+ denoise_fn=simple_denoising_func(
314
+ video_context=v_context_p,
315
+ audio_context=a_context_p,
316
+ transformer=transformer,
317
+ ),
318
+ )
319
+
320
+ video_state, audio_state = denoise_audio_video(
321
+ output_shape=stage_2_output_shape,
322
+ conditionings=stage_2_conditionings,
323
+ noiser=noiser,
324
+ sigmas=distilled_sigmas,
325
+ stepper=stepper,
326
+ denoising_loop_fn=second_stage_denoising_loop,
327
+ components=self.pipeline_components,
328
+ dtype=dtype,
329
+ device=device,
330
+ noise_scale=distilled_sigmas[0],
331
+ initial_video_latent=upscaled_video_latent,
332
+ initial_audio_latent=audio_state.latent,
333
+ )
334
+
335
+ torch.cuda.synchronize()
336
+ del transformer
337
+ cleanup_memory()
338
+
339
+ # ===================== DECODE =====================
340
+ decoded_video = vae_decode_video(
341
+ video_state.latent, self.model_ledger.video_decoder(), tiling_config, generator
342
+ )
343
+ decoded_audio = vae_decode_audio(
344
+ audio_state.latent, self.model_ledger.audio_decoder(), self.model_ledger.vocoder()
345
+ )
346
+
347
+ return decoded_video, decoded_audio
348
+
349
+
350
+ # =============================================================================
351
+ # Model Download
352
+ # =============================================================================
353
+
354
+ print("=" * 80)
355
+ print("Downloading LTX-2.3 HQ models...")
356
+ print("=" * 80)
357
+
358
+ checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-dev.safetensors")
359
+ spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.1.safetensors")
360
+ distilled_lora_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled-lora-384.safetensors")
361
+ gemma_root = snapshot_download(repo_id=GEMMA_REPO)
362
+
363
+ print(f"Dev checkpoint: {checkpoint_path}")
364
+ print(f"Spatial upsampler: {spatial_upsampler_path}")
365
+ print(f"Distilled LoRA: {distilled_lora_path}")
366
+ print(f"Gemma root: {gemma_root}")
367
+
368
+ # =============================================================================
369
+ # Download Custom LoRAs
370
+ # =============================================================================
371
+
372
+ LORA_REPO = "dagloop5/LoRA"
373
+
374
+ print("=" * 80)
375
+ print("Downloading custom LoRA adapters...")
376
+ print("=" * 80)
377
+
378
+ pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
379
+ general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_reasoning_I2V_V3.safetensors")
380
+ motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
381
+ dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors")
382
+ mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors")
383
+ dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors")
384
+ fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="cr3ampi3_animation_i2v_ltx2_v1.0.safetensors")
385
+ liquid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="liquid_wet_dr1pp_ltx2_v1.0_scaled.safetensors")
386
+ demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="clapping-cheeks-audio-v001-alpha.safetensors")
387
+ voice_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="hentai_voice_ltx23.safetensors")
388
+ realism_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="FurryenhancerLTX2.3V1.215.safetensors")
389
+ transition_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2_takerpov_lora_v1.2.safetensors")
390
+
391
+ print(f"All 12 custom LoRAs downloaded + distilled LoRA")
392
+ print("=" * 80)
393
+
394
+ # =============================================================================
395
+ # Pipeline Initialization
396
+ # =============================================================================
397
+
398
+ print("Initializing HQ Pipeline...")
399
+
400
+ pipeline = HQPipelineWithCachedLoRA(
401
+ checkpoint_path=checkpoint_path,
402
+ spatial_upsampler_path=spatial_upsampler_path,
403
+ gemma_root=gemma_root,
404
+ quantization=QuantizationPolicy.fp8_cast(),
405
+ )
406
+
407
+ print("Pipeline initialized!")
408
+ print("=" * 80)
409
+
410
+ # =============================================================================
411
+ # ZeroGPU Tensor Preloading - Single Transformer
412
+ # =============================================================================
413
+
414
+ print("Preloading models for ZeroGPU tensor packing...")
415
+
416
+ # Load shared components
417
+ _video_encoder = pipeline.model_ledger.video_encoder()
418
+ _video_decoder = pipeline.model_ledger.video_decoder()
419
+ _audio_encoder = pipeline.model_ledger.audio_encoder()
420
+ _audio_decoder = pipeline.model_ledger.audio_decoder()
421
+ _vocoder = pipeline.model_ledger.vocoder()
422
+ _spatial_upsampler = pipeline.model_ledger.spatial_upsampler()
423
+ _text_encoder = pipeline.model_ledger.text_encoder()
424
+ _embeddings_processor = pipeline.model_ledger.gemma_embeddings_processor()
425
+
426
+ # Load the SINGLE transformer
427
+ _transformer = pipeline.model_ledger.transformer()
428
+
429
+ # Replace ledger methods with lambdas returning cached instances
430
+ pipeline.model_ledger.video_encoder = lambda: _video_encoder
431
+ pipeline.model_ledger.video_decoder = lambda: _video_decoder
432
+ pipeline.model_ledger.audio_encoder = lambda: _audio_encoder
433
+ pipeline.model_ledger.audio_decoder = lambda: _audio_decoder
434
+ pipeline.model_ledger.vocoder = lambda: _vocoder
435
+ pipeline.model_ledger.spatial_upsampler = lambda: _spatial_upsampler
436
+ pipeline.model_ledger.text_encoder = lambda: _text_encoder
437
+ pipeline.model_ledger.gemma_embeddings_processor = lambda: _embeddings_processor
438
+ pipeline.model_ledger.transformer = lambda: _transformer
439
+
440
+ print("All models preloaded for ZeroGPU tensor packing!")
441
+ print("=" * 80)
442
+ print("Pipeline ready!")
443
+ print("=" * 80)
444
+
445
+ # =============================================================================
446
+ # LoRA Cache Functions
447
+ # =============================================================================
448
+
449
+ LORA_CACHE_DIR = Path("lora_cache")
450
+ LORA_CACHE_DIR.mkdir(exist_ok=True)
451
+
452
+ def prepare_lora_cache(
453
+ distilled_strength: float,
454
+ pose_strength: float, general_strength: float, motion_strength: float,
455
+ dreamlay_strength: float, mself_strength: float, dramatic_strength: float,
456
+ fluid_strength: float, liquid_strength: float, demopose_strength: float,
457
+ voice_strength: float, realism_strength: float, transition_strength: float,
458
+ progress=gr.Progress(track_tqdm=True),
459
+ ):
460
+ """Build cached LoRA state for single transformer."""
461
+ global pipeline
462
+
463
+ progress(0.05, desc="Preparing LoRA cache...")
464
+
465
+ key_str = f"{checkpoint_path}:{distilled_strength}:{pose_strength}:{general_strength}:{motion_strength}:{dreamlay_strength}:{mself_strength}:{dramatic_strength}:{fluid_strength}:{liquid_strength}:{demopose_strength}:{voice_strength}:{realism_strength}:{transition_strength}"
466
+ key = hashlib.sha256(key_str.encode()).hexdigest()
467
+
468
+ cache_path = LORA_CACHE_DIR / f"{key}.safetensors"
469
+
470
+ if cache_path.exists():
471
+ progress(0.20, desc="Loading cached LoRA state...")
472
+ state = load_file(str(cache_path))
473
+ pipeline.apply_cached_lora_state(state)
474
+ return f"Loaded cached LoRA state: {cache_path.name}"
475
+
476
+ entries = [
477
+ (distilled_lora_path, distilled_strength),
478
+ (pose_lora_path, pose_strength),
479
+ (general_lora_path, general_strength),
480
+ (motion_lora_path, motion_strength),
481
+ (dreamlay_lora_path, dreamlay_strength),
482
+ (mself_lora_path, mself_strength),
483
+ (dramatic_lora_path, dramatic_strength),
484
+ (fluid_lora_path, fluid_strength),
485
+ (liquid_lora_path, liquid_strength),
486
+ (demopose_lora_path, demopose_strength),
487
+ (voice_lora_path, voice_strength),
488
+ (realism_lora_path, realism_strength),
489
+ (transition_lora_path, transition_strength),
490
+ ]
491
+
492
+ loras = [
493
+ LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
494
+ for path, strength in entries
495
+ if path is not None and float(strength) != 0.0
496
+ ]
497
+
498
+ progress(0.35, desc="Building fused state (CPU)...")
499
+ tmp_ledger = pipeline.model_ledger.__class__(
500
+ dtype=torch.bfloat16,
501
+ device=torch.device("cpu"),
502
+ checkpoint_path=str(checkpoint_path),
503
+ spatial_upsampler_path=str(spatial_upsampler_path),
504
+ gemma_root_path=str(gemma_root),
505
+ loras=tuple(loras),
506
+ quantization=None,
507
+ )
508
+ transformer = tmp_ledger.transformer()
509
+ state = {k: v.detach().cpu().contiguous() for k, v in transformer.state_dict().items()}
510
+ save_file(state, str(cache_path))
511
+
512
+ del transformer, tmp_ledger
513
+ gc.collect()
514
+
515
+ progress(0.90, desc="Applying LoRA state to pipeline...")
516
+ pipeline.apply_cached_lora_state(state)
517
+
518
+ progress(1.0, desc="Done!")
519
+ return f"Built and cached LoRA state: {cache_path.name}"
520
+
521
+
522
+ # =============================================================================
523
+ # Helper Functions
524
+ # =============================================================================
525
+
526
+ def log_memory(tag: str):
527
+ if torch.cuda.is_available():
528
+ allocated = torch.cuda.memory_allocated() / 1024**3
529
+ peak = torch.cuda.max_memory_allocated() / 1024**3
530
+ free, total = torch.cuda.mem_get_info()
531
+ print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
532
+
533
+
534
+ def calculate_frames(duration: float, frame_rate: float = DEFAULT_FRAME_RATE) -> int:
535
+ ideal_frames = int(duration * frame_rate)
536
+ ideal_frames = max(ideal_frames, MIN_FRAMES)
537
+ k = round((ideal_frames - 1) / 8)
538
+ frames = k * 8 + 1
539
+ return min(frames, MAX_FRAMES)
540
+
541
+
542
+ def validate_resolution(height: int, width: int) -> tuple[int, int]:
543
+ height = round(height / STEP) * STEP
544
+ width = round(width / STEP) * STEP
545
+ height = max(MIN_DIM, min(height, MAX_DIM))
546
+ width = max(MIN_DIM, min(width, MAX_DIM))
547
+ return height, width
548
+
549
+
550
+ def detect_aspect_ratio(image) -> str:
551
+ if image is None:
552
+ return "16:9"
553
+ if hasattr(image, "size"):
554
+ w, h = image.size
555
+ elif hasattr(image, "shape"):
556
+ h, w = image.shape[:2]
557
+ else:
558
+ return "16:9"
559
+ ratio = w / h
560
+ candidates = {"16:9": 16/9, "9:16": 9/16, "1:1": 1.0}
561
+ return min(candidates, key=lambda k: abs(ratio - candidates[k]))
562
+
563
+
564
+ def on_image_upload(first_image, last_image, high_res):
565
+ ref_image = first_image if first_image is not None else last_image
566
+ aspect = detect_aspect_ratio(ref_image)
567
+ tier = "high" if high_res else "low"
568
+ w, h = RESOLUTIONS[tier][aspect]
569
+ return gr.update(value=w), gr.update(value=h)
570
+
571
+
572
+ def on_highres_toggle(first_image, last_image, high_res):
573
+ ref_image = first_image if first_image is not None else last_image
574
+ aspect = detect_aspect_ratio(ref_image)
575
+ tier = "high" if high_res else "low"
576
+ w, h = RESOLUTIONS[tier][aspect]
577
+ return gr.update(value=w), gr.update(value=h)
578
+
579
+
580
+ def get_gpu_duration(
581
+ first_image,
582
+ last_image,
583
+ prompt: str,
584
+ negative_prompt: str,
585
+ duration: float,
586
+ gpu_duration: float,
587
+ enhance_prompt: bool = False,
588
+ seed: int = 42,
589
+ randomize_seed: bool = True,
590
+ height: int = 1024,
591
+ width: int = 1536,
592
+ video_cfg_scale: float = 1.0,
593
+ video_stg_scale: float = 0.0,
594
+ video_rescale_scale: float = 0.45,
595
+ video_a2v_scale: float = 3.0,
596
+ audio_cfg_scale: float = 1.0,
597
+ audio_stg_scale: float = 0.0,
598
+ audio_rescale_scale: float = 1.0,
599
+ audio_v2a_scale: float = 3.0,
600
+ distilled_strength: float = 0.0,
601
+ pose_strength: float = 0.0,
602
+ general_strength: float = 0.0,
603
+ motion_strength: float = 0.0,
604
+ dreamlay_strength: float = 0.0,
605
+ mself_strength: float = 0.0,
606
+ dramatic_strength: float = 0.0,
607
+ fluid_strength: float = 0.0,
608
+ liquid_strength: float = 0.0,
609
+ demopose_strength: float = 0.0,
610
+ voice_strength: float = 0.0,
611
+ realism_strength: float = 0.0,
612
+ transition_strength: float = 0.0,
613
+ progress=None,
614
+ ) -> int:
615
+ return int(gpu_duration)
616
+
617
+
618
+ @spaces.GPU(duration=get_gpu_duration)
619
+ @torch.inference_mode()
620
+ def generate_video(
621
+ first_image,
622
+ last_image,
623
+ prompt: str,
624
+ negative_prompt: str,
625
+ duration: float,
626
+ gpu_duration: float,
627
+ enhance_prompt: bool = False,
628
+ seed: int = 42,
629
+ randomize_seed: bool = True,
630
+ height: int = 1024,
631
+ width: int = 1536,
632
+ video_cfg_scale: float = 1.0,
633
+ video_stg_scale: float = 0.0,
634
+ video_rescale_scale: float = 0.45,
635
+ video_a2v_scale: float = 3.0,
636
+ audio_cfg_scale: float = 1.0,
637
+ audio_stg_scale: float = 0.0,
638
+ audio_rescale_scale: float = 1.0,
639
+ audio_v2a_scale: float = 3.0,
640
+ distilled_strength: float = 0.0,
641
+ pose_strength: float = 0.0,
642
+ general_strength: float = 0.0,
643
+ motion_strength: float = 0.0,
644
+ dreamlay_strength: float = 0.0,
645
+ mself_strength: float = 0.0,
646
+ dramatic_strength: float = 0.0,
647
+ fluid_strength: float = 0.0,
648
+ liquid_strength: float = 0.0,
649
+ demopose_strength: float = 0.0,
650
+ voice_strength: float = 0.0,
651
+ realism_strength: float = 0.0,
652
+ transition_strength: float = 0.0,
653
+ progress=gr.Progress(track_tqdm=True),
654
+ ):
655
+ try:
656
+ torch.cuda.reset_peak_memory_stats()
657
+ log_memory("start")
658
+
659
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
660
+ print(f"Using seed: {current_seed}")
661
+
662
+ height, width = validate_resolution(int(height), int(width))
663
+ print(f"Resolution: {width}x{height}")
664
+
665
+ num_frames = calculate_frames(duration, DEFAULT_FRAME_RATE)
666
+ print(f"Frames: {num_frames} ({duration}s @ {DEFAULT_FRAME_RATE}fps)")
667
+
668
+ images = []
669
+ output_dir = Path("outputs")
670
+ output_dir.mkdir(exist_ok=True)
671
+
672
+ if first_image is not None:
673
+ temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
674
+ if hasattr(first_image, "save"):
675
+ first_image.save(temp_first_path)
676
+ else:
677
+ import shutil
678
+ shutil.copy(first_image, temp_first_path)
679
+ images.append((str(temp_first_path), 1.0))
680
+
681
+ if last_image is not None:
682
+ temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
683
+ if hasattr(last_image, "save"):
684
+ last_image.save(temp_last_path)
685
+ else:
686
+ import shutil
687
+ shutil.copy(last_image, temp_last_path)
688
+ images.append((str(temp_last_path), 1.0))
689
+
690
+ tiling_config = TilingConfig.default()
691
+ video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
692
+
693
+ video_guider_params = MultiModalGuiderParams(
694
+ cfg_scale=video_cfg_scale,
695
+ stg_scale=video_stg_scale,
696
+ rescale_scale=video_rescale_scale,
697
+ modality_scale=video_a2v_scale,
698
+ skip_step=0,
699
+ stg_blocks=[],
700
+ )
701
+
702
+ audio_guider_params = MultiModalGuiderParams(
703
+ cfg_scale=audio_cfg_scale,
704
+ stg_scale=audio_stg_scale,
705
+ rescale_scale=audio_rescale_scale,
706
+ modality_scale=audio_v2a_scale,
707
+ skip_step=0,
708
+ stg_blocks=[],
709
+ )
710
+
711
+ log_memory("before pipeline call")
712
+
713
+ video, audio = pipeline(
714
+ prompt=prompt,
715
+ negative_prompt=negative_prompt,
716
+ seed=current_seed,
717
+ height=height,
718
+ width=width,
719
+ num_frames=num_frames,
720
+ frame_rate=DEFAULT_FRAME_RATE,
721
+ num_inference_steps=LTX_2_3_HQ_PARAMS.num_inference_steps,
722
+ video_guider_params=video_guider_params,
723
+ audio_guider_params=audio_guider_params,
724
+ images=images,
725
+ tiling_config=tiling_config,
726
+ enhance_prompt=enhance_prompt,
727
+ )
728
+
729
+ log_memory("after pipeline call")
730
+
731
+ output_path = tempfile.mktemp(suffix=".mp4")
732
+ encode_video(
733
+ video=video,
734
+ fps=DEFAULT_FRAME_RATE,
735
+ audio=audio,
736
+ output_path=output_path,
737
+ video_chunks_number=video_chunks_number,
738
+ )
739
+
740
+ log_memory("after encode_video")
741
+ return str(output_path), current_seed
742
+
743
+ except Exception as e:
744
+ import traceback
745
+ log_memory("on error")
746
+ print(f"Error: {str(e)}\n{traceback.format_exc()}")
747
+ return None, current_seed
748
+
749
+
750
+ # =============================================================================
751
+ # Gradio UI
752
+ # =============================================================================
753
+
754
+ css = """
755
+ .fillable {max-width: 1200px !important}
756
+ .progress-text {color: white}
757
+ """
758
+
759
+ with gr.Blocks(title="LTX-2.3 Two-Stage HQ with LoRA Cache") as demo:
760
+ gr.Markdown("# LTX-2.3 Two-Stage HQ Video Generation with LoRA Cache")
761
+ gr.Markdown(
762
+ "High-quality text/image-to-video with cached LoRA state + CFG guidance. "
763
+ "[[Model]](https://huggingface.co/Lightricks/LTX-2.3)"
764
+ )
765
+
766
+ with gr.Row():
767
+ # LEFT SIDE: Input Controls
768
+ with gr.Column():
769
+ with gr.Row():
770
+ first_image = gr.Image(label="First Frame (Optional)", type="pil")
771
+ last_image = gr.Image(label="Last Frame (Optional)", type="pil")
772
+
773
+ prompt = gr.Textbox(
774
+ label="Prompt",
775
+ value=DEFAULT_PROMPT,
776
+ lines=3,
777
+ )
778
+
779
+ negative_prompt = gr.Textbox(
780
+ label="Negative Prompt",
781
+ value=DEFAULT_NEGATIVE_PROMPT,
782
+ lines=2,
783
+ )
784
+
785
+ duration = gr.Slider(
786
+ label="Duration (seconds)",
787
+ minimum=0.5, maximum=8.0, value=2.0, step=0.1,
788
+ )
789
+
790
+ with gr.Row():
791
+ seed = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=MAX_SEED)
792
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
793
+
794
+ with gr.Row():
795
+ enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
796
+ high_res = gr.Checkbox(label="High Resolution", value=True)
797
+
798
+ with gr.Row():
799
+ width = gr.Number(label="Width", value=1536, precision=0)
800
+ height = gr.Number(label="Height", value=1024, precision=0)
801
+
802
+ generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
803
+
804
+ with gr.Accordion("Advanced Settings", open=False):
805
+ gr.Markdown("### Video Guidance Parameters")
806
+
807
+ with gr.Row():
808
+ video_cfg_scale = gr.Slider(
809
+ label="Video CFG Scale", minimum=1.0, maximum=10.0,
810
+ value=LTX_2_3_HQ_PARAMS.video_guider_params.cfg_scale, step=0.1
811
+ )
812
+ video_stg_scale = gr.Slider(
813
+ label="Video STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1
814
+ )
815
+
816
+ with gr.Row():
817
+ video_rescale_scale = gr.Slider(
818
+ label="Video Rescale", minimum=0.0, maximum=2.0, value=0.45, step=0.1
819
+ )
820
+ video_a2v_scale = gr.Slider(
821
+ label="A2V Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1
822
+ )
823
+
824
+ gr.Markdown("### Audio Guidance Parameters")
825
+
826
+ with gr.Row():
827
+ audio_cfg_scale = gr.Slider(
828
+ label="Audio CFG Scale", minimum=1.0, maximum=15.0,
829
+ value=LTX_2_3_HQ_PARAMS.audio_guider_params.cfg_scale, step=0.1
830
+ )
831
+ audio_stg_scale = gr.Slider(
832
+ label="Audio STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1
833
+ )
834
+
835
+ with gr.Row():
836
+ audio_rescale_scale = gr.Slider(
837
+ label="Audio Rescale", minimum=0.0, maximum=2.0, value=1.0, step=0.1
838
+ )
839
+ audio_v2a_scale = gr.Slider(
840
+ label="V2A Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1
841
+ )
842
+
843
+ # RIGHT SIDE: Output and LoRA
844
+ with gr.Column():
845
+ output_video = gr.Video(label="Generated Video", autoplay=True)
846
+
847
+ gpu_duration = gr.Slider(
848
+ label="ZeroGPU duration (seconds)",
849
+ minimum=30.0, maximum=240.0, value=90.0, step=1.0,
850
+ info="Increase for longer videos, higher resolution, or LoRA usage"
851
+ )
852
+
853
+ gr.Markdown("### LoRA Adapter Strengths")
854
+ gr.Markdown("Set to 0 to disable, then click 'Prepare LoRA Cache'")
855
+
856
+ with gr.Row():
857
+ distilled_strength = gr.Slider(label="Distilled LoRA", minimum=0.0, maximum=1.5, value=0.0, step=0.01)
858
+ pose_strength = gr.Slider(label="Anthro Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
859
+
860
+ with gr.Row():
861
+ general_strength = gr.Slider(label="Reasoning Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
862
+ motion_strength = gr.Slider(label="Anthro Posing", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
863
+
864
+ with gr.Row():
865
+ dreamlay_strength = gr.Slider(label="Dreamlay", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
866
+ mself_strength = gr.Slider(label="Mself", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
867
+
868
+ with gr.Row():
869
+ dramatic_strength = gr.Slider(label="Dramatic", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
870
+ fluid_strength = gr.Slider(label="Fluid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
871
+
872
+ with gr.Row():
873
+ liquid_strength = gr.Slider(label="Liquid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
874
+ demopose_strength = gr.Slider(label="Audio Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
875
+
876
+ with gr.Row():
877
+ voice_strength = gr.Slider(label="Voice Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
878
+ realism_strength = gr.Slider(label="Anthro Realism", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
879
+
880
+ with gr.Row():
881
+ transition_strength = gr.Slider(label="POV", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
882
+ gr.Markdown("") # Spacer for alignment
883
+
884
+ prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
885
+ lora_status = gr.Textbox(
886
+ label="LoRA Cache Status",
887
+ value="No LoRA state prepared yet.",
888
+ interactive=False,
889
+ )
890
+
891
+ # Event handlers
892
+ first_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height])
893
+ last_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height])
894
+ high_res.change(fn=on_highres_toggle, inputs=[first_image, last_image, high_res], outputs=[width, height])
895
+
896
+ prepare_lora_btn.click(
897
+ fn=prepare_lora_cache,
898
+ inputs=[distilled_strength, pose_strength, general_strength, motion_strength, dreamlay_strength,
899
+ mself_strength, dramatic_strength, fluid_strength, liquid_strength,
900
+ demopose_strength, voice_strength, realism_strength, transition_strength],
901
+ outputs=[lora_status],
902
+ )
903
+
904
+ generate_btn.click(
905
+ fn=generate_video,
906
+ inputs=[
907
+ first_image, last_image, prompt, negative_prompt, duration, gpu_duration,
908
+ seed, randomize_seed, height, width, enhance_prompt,
909
+ video_cfg_scale, video_stg_scale, video_rescale_scale, video_a2v_scale,
910
+ audio_cfg_scale, audio_stg_scale, audio_rescale_scale, audio_v2a_scale,
911
+ distilled_strength, pose_strength, general_strength, motion_strength,
912
+ dreamlay_strength, mself_strength, dramatic_strength, fluid_strength,
913
+ liquid_strength, demopose_strength, voice_strength, realism_strength,
914
+ transition_strength,
915
+ ],
916
+ outputs=[output_video, seed],
917
+ )
918
+
919
+
920
+ if __name__ == "__main__":
921
+ demo.queue().launch(theme=gr.themes.Citrus(), css=css, mcp_server=True)