dagloop5 commited on
Commit
fc168fb
·
verified ·
1 Parent(s): ffd3aee

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +982 -0
app.py ADDED
@@ -0,0 +1,982 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import sys
4
+
5
+ # Disable torch.compile / dynamo before any torch import
6
+ os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
+ os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
+
9
+ # Install xformers for memory-efficient attention
10
+ subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
+
12
+ # Clone LTX-2 repo and install packages
13
+ LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
+ LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
+
16
+ LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2" # known working commit with decode_video
17
+
18
+ if not os.path.exists(LTX_REPO_DIR):
19
+ print(f"Cloning {LTX_REPO_URL}...")
20
+ subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True)
21
+ subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True)
22
+
23
+ print("Installing ltx-core and ltx-pipelines from cloned repo...")
24
+ subprocess.run(
25
+ [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
26
+ os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
27
+ "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
28
+ check=True,
29
+ )
30
+
31
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
+
34
+ import logging
35
+ import random
36
+ import tempfile
37
+ from pathlib import Path
38
+ import gc
39
+ import hashlib
40
+
41
+ import torch
42
+ torch._dynamo.config.suppress_errors = True
43
+ torch._dynamo.config.disable = True
44
+
45
+ import spaces
46
+ import gradio as gr
47
+ import numpy as np
48
+ from huggingface_hub import hf_hub_download, snapshot_download
49
+ from safetensors.torch import load_file, save_file
50
+ from safetensors import safe_open
51
+ import json
52
+ import requests
53
+
54
+ from ltx_core.components.diffusion_steps import EulerDiffusionStep
55
+ from ltx_core.components.guiders import MultiModalGuider, MultiModalGuiderParams
56
+ from ltx_core.components.noisers import GaussianNoiser
57
+ from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
58
+ from ltx_core.model.upsampler import upsample_video
59
+ from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
60
+ from ltx_core.quantization import QuantizationPolicy
61
+ from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
62
+ from ltx_pipelines.distilled import DistilledPipeline
63
+ from ltx_pipelines.utils import euler_denoising_loop
64
+ from ltx_pipelines.utils.args import ImageConditioningInput
65
+ from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
66
+ from ltx_pipelines.utils.helpers import (
67
+ cleanup_memory,
68
+ combined_image_conditionings,
69
+ denoise_video_only,
70
+ encode_prompts,
71
+ simple_denoising_func,
72
+ multi_modal_guider_denoising_func,
73
+ )
74
+ from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
75
+ from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
76
+ from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
77
+
78
+ # Force-patch xformers attention into the LTX attention module.
79
+ from ltx_core.model.transformer import attention as _attn_mod
80
+ print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
81
+ try:
82
+ from xformers.ops import memory_efficient_attention as _mea
83
+ _attn_mod.memory_efficient_attention = _mea
84
+ print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
85
+ except Exception as e:
86
+ print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
87
+
88
+ logging.getLogger().setLevel(logging.INFO)
89
+
90
+ MAX_SEED = np.iinfo(np.int32).max
91
+ DEFAULT_PROMPT = (
92
+ "An astronaut hatches from a fragile egg on the surface of the Moon, "
93
+ "the shell cracking and peeling apart in gentle low-gravity motion. "
94
+ "Fine lunar dust lifts and drifts outward with each movement, floating "
95
+ "in slow arcs before settling back onto the ground."
96
+ )
97
+ DEFAULT_NEGATIVE_PROMPT = (
98
+ "worst quality, inconsistent motion, blurry, jittery, distorted, "
99
+ "deformed, artifacts, text, watermark, logo, frame, border, "
100
+ "low resolution, pixelated, unnatural, fake, CGI, cartoon"
101
+ )
102
+ DEFAULT_FRAME_RATE = 24.0
103
+
104
+ # Resolution presets: (width, height)
105
+ RESOLUTIONS = {
106
+ "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
107
+ "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
108
+ }
109
+
110
+ class LTX23DistilledA2VPipeline:
111
+ """Standalone pipeline with optional audio conditioning — no parent class."""
112
+
113
+ def __init__(
114
+ self,
115
+ distilled_checkpoint_path: str,
116
+ spatial_upsampler_path: str,
117
+ gemma_root: str,
118
+ loras: tuple,
119
+ quantization: QuantizationPolicy | None = None,
120
+ ):
121
+ from ltx_pipelines.utils import ModelLedger, denoise_audio_video
122
+ from ltx_pipelines.utils.types import PipelineComponents
123
+
124
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
125
+ self.dtype = torch.bfloat16
126
+
127
+ self.model_ledger = ModelLedger(
128
+ dtype=self.dtype,
129
+ device=self.device,
130
+ checkpoint_path=distilled_checkpoint_path,
131
+ gemma_root_path=gemma_root,
132
+ spatial_upsampler_path=spatial_upsampler_path,
133
+ loras=loras,
134
+ quantization=quantization,
135
+ )
136
+
137
+ self.pipeline_components = PipelineComponents(
138
+ dtype=self.dtype,
139
+ device=self.device,
140
+ )
141
+
142
+ def __call__(
143
+ self,
144
+ prompt: str,
145
+ negative_prompt: str,
146
+ seed: int,
147
+ height: int,
148
+ width: int,
149
+ num_frames: int,
150
+ frame_rate: float,
151
+ video_guider_params: MultiModalGuiderParams,
152
+ audio_guider_params: MultiModalGuiderParams,
153
+ images: list[ImageConditioningInput],
154
+ audio_path: str | None = None,
155
+ tiling_config: TilingConfig | None = None,
156
+ enhance_prompt: bool = False,
157
+ ):
158
+ print(prompt)
159
+
160
+ generator = torch.Generator(device=self.device).manual_seed(seed)
161
+ noiser = GaussianNoiser(generator=generator)
162
+ stepper = EulerDiffusionStep()
163
+ dtype = torch.bfloat16
164
+
165
+ ctx_p, ctx_n = encode_prompts(
166
+ [prompt, negative_prompt],
167
+ self.model_ledger,
168
+ enhance_first_prompt=enhance_prompt,
169
+ enhance_prompt_image=images[0].path if len(images) > 0 else None,
170
+ )
171
+ v_context_p, a_context_p = ctx_p.video_encoding, ctx_p.audio_encoding
172
+ v_context_n, a_context_n = ctx_n.video_encoding, ctx_n.audio_encoding
173
+
174
+ # ── Audio encoding (only for conditioning, not output generation) ──
175
+ encoded_audio_latent = None
176
+ decoded_audio = None
177
+ if audio_path is not None:
178
+ video_duration = num_frames / frame_rate
179
+ decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
180
+ if decoded_audio is None:
181
+ raise ValueError(f"Could not extract audio stream from {audio_path}")
182
+
183
+ encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
184
+ audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
185
+ expected_frames = audio_shape.frames
186
+ actual_frames = encoded_audio_latent.shape[2]
187
+
188
+ if actual_frames > expected_frames:
189
+ encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
190
+ elif actual_frames < expected_frames:
191
+ pad = torch.zeros(
192
+ encoded_audio_latent.shape[0],
193
+ encoded_audio_latent.shape[1],
194
+ expected_frames - actual_frames,
195
+ encoded_audio_latent.shape[3],
196
+ device=encoded_audio_latent.device,
197
+ dtype=encoded_audio_latent.dtype,
198
+ )
199
+ encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
200
+
201
+ video_encoder = self.model_ledger.video_encoder()
202
+ transformer = self.model_ledger.transformer()
203
+ stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
204
+
205
+ def stage1_denoising_loop(sigmas, video_state, audio_state, stepper):
206
+ return euler_denoising_loop(
207
+ sigmas=sigmas,
208
+ video_state=video_state,
209
+ audio_state=audio_state,
210
+ stepper=stepper,
211
+ denoise_fn=multi_modal_guider_denoising_func(
212
+ video_guider=MultiModalGuider(
213
+ params=video_guider_params,
214
+ negative_context=v_context_n,
215
+ ),
216
+ audio_guider=MultiModalGuider(
217
+ params=audio_guider_params,
218
+ negative_context=a_context_n,
219
+ ),
220
+ v_context=v_context_p,
221
+ a_context=a_context_p,
222
+ transformer=transformer,
223
+ ),
224
+ )
225
+
226
+ def stage2_denoising_loop(sigmas, video_state, audio_state, stepper):
227
+ return euler_denoising_loop(
228
+ sigmas=sigmas,
229
+ video_state=video_state,
230
+ audio_state=audio_state,
231
+ stepper=stepper,
232
+ denoise_fn=simple_denoising_func(
233
+ video_context=v_context_p,
234
+ audio_context=a_context_p,
235
+ transformer=transformer,
236
+ ),
237
+ )
238
+
239
+ # ── Stage 1: Half resolution ──
240
+ stage_1_output_shape = VideoPixelShape(
241
+ batch=1,
242
+ frames=num_frames,
243
+ width=width // 2,
244
+ height=height // 2,
245
+ fps=frame_rate,
246
+ )
247
+ stage_1_conditionings = combined_image_conditionings(
248
+ images=images,
249
+ height=stage_1_output_shape.height,
250
+ width=stage_1_output_shape.width,
251
+ video_encoder=video_encoder,
252
+ dtype=dtype,
253
+ device=self.device,
254
+ )
255
+
256
+ # Use denoise_audio_video so audio is ALWAYS generated
257
+ from ltx_pipelines.utils import denoise_audio_video
258
+ video_state, audio_state = denoise_audio_video(
259
+ output_shape=stage_1_output_shape,
260
+ conditionings=stage_1_conditionings,
261
+ noiser=noiser,
262
+ sigmas=stage_1_sigmas,
263
+ stepper=stepper,
264
+ denoising_loop_fn=stage1_denoising_loop,
265
+ components=self.pipeline_components,
266
+ dtype=dtype,
267
+ device=self.device,
268
+ initial_audio_latent=encoded_audio_latent,
269
+ )
270
+
271
+ torch.cuda.synchronize()
272
+ cleanup_memory()
273
+
274
+ # ── Upscaling ──
275
+ upscaled_video_latent = upsample_video(
276
+ latent=video_state.latent[:1],
277
+ video_encoder=video_encoder,
278
+ upsampler=self.model_ledger.spatial_upsampler(),
279
+ )
280
+
281
+ # ── Stage 2: Full resolution ──
282
+ stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
283
+ stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
284
+ stage_2_conditionings = combined_image_conditionings(
285
+ images=images,
286
+ height=stage_2_output_shape.height,
287
+ width=stage_2_output_shape.width,
288
+ video_encoder=video_encoder,
289
+ dtype=dtype,
290
+ device=self.device,
291
+ )
292
+ video_state, audio_state = denoise_audio_video(
293
+ output_shape=stage_2_output_shape,
294
+ conditionings=stage_2_conditionings,
295
+ noiser=noiser,
296
+ sigmas=stage_2_sigmas,
297
+ stepper=stepper,
298
+ denoising_loop_fn=stage2_denoising_loop,
299
+ components=self.pipeline_components,
300
+ dtype=dtype,
301
+ device=self.device,
302
+ noise_scale=stage_2_sigmas[0],
303
+ initial_video_latent=upscaled_video_latent,
304
+ initial_audio_latent=audio_state.latent,
305
+ )
306
+
307
+ torch.cuda.synchronize()
308
+ del transformer
309
+ del video_encoder
310
+ cleanup_memory()
311
+
312
+ # ── Decode both video and audio ──
313
+ decoded_video = vae_decode_video(
314
+ video_state.latent,
315
+ self.model_ledger.video_decoder(),
316
+ tiling_config,
317
+ generator,
318
+ )
319
+ decoded_audio_output = vae_decode_audio(
320
+ audio_state.latent,
321
+ self.model_ledger.audio_decoder(),
322
+ self.model_ledger.vocoder(),
323
+ )
324
+
325
+ return decoded_video, decoded_audio_output
326
+
327
+ # Model repos
328
+ LTX_MODEL_REPO = "Lightricks/LTX-2.3"
329
+ GEMMA_REPO ="Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
330
+
331
+ # Download model checkpoints
332
+ print("=" * 80)
333
+ print("Downloading LTX-2.3 distilled model + Gemma...")
334
+ print("=" * 80)
335
+
336
+ # LoRA cache directory and currently-applied key
337
+ LORA_CACHE_DIR = Path("lora_cache")
338
+ LORA_CACHE_DIR.mkdir(exist_ok=True)
339
+ current_lora_key: str | None = None
340
+
341
+ PENDING_LORA_KEY: str | None = None
342
+ PENDING_LORA_STATE: dict[str, torch.Tensor] | None = None
343
+ PENDING_LORA_STATUS: str = "No LoRA state prepared yet."
344
+
345
+ weights_dir = Path("weights")
346
+ weights_dir.mkdir(exist_ok=True)
347
+ checkpoint_path = hf_hub_download(
348
+ repo_id=LTX_MODEL_REPO,
349
+ filename="ltx-2.3-22b-distilled-1.1.safetensors",
350
+ local_dir=str(weights_dir),
351
+ local_dir_use_symlinks=False,
352
+ )
353
+ spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.1.safetensors")
354
+ gemma_root = snapshot_download(repo_id=GEMMA_REPO)
355
+
356
+
357
+ # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
358
+ # LoRA repo + download the requested LoRA adapters
359
+ LORA_REPO = "dagloop5/LoRA"
360
+
361
+ print("=" * 80)
362
+ print("Downloading LoRA adapters from dagloop5/LoRA...")
363
+ print("=" * 80)
364
+ pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
365
+ general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_reasoning_I2V_V3.safetensors")
366
+ motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
367
+ dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors") # m15510n4ry, bl0wj0b, d0ubl3_bj, d0gg1e, c0wg1rl
368
+ mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors") # Hyperfap
369
+ dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors") # "[He | She] is having am orgasm." (am or an?)
370
+ fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="cr3ampi3_animation_i2v_ltx2_v1.0.safetensors") # cr3ampi3 animation., missionary animation, doggystyle bouncy animation, double penetration animation
371
+ liquid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="liquid_wet_dr1pp_ltx2_v1.0_scaled.safetensors") # wet dr1pp
372
+ demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="clapping-cheeks-audio-v001-alpha.safetensors")
373
+ voice_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="hentai_voice_ltx23.safetensors")
374
+ realism_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="FurryenhancerLTX2.3V1.215.safetensors")
375
+ transition_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2_takerpov_lora_v1.2.safetensors") # takerpov1, taker pov
376
+
377
+ print(f"Pose LoRA: {pose_lora_path}")
378
+ print(f"General LoRA: {general_lora_path}")
379
+ print(f"Motion LoRA: {motion_lora_path}")
380
+ print(f"Dreamlay LoRA: {dreamlay_lora_path}")
381
+ print(f"Mself LoRA: {mself_lora_path}")
382
+ print(f"Dramatic LoRA: {dramatic_lora_path}")
383
+ print(f"Fluid LoRA: {fluid_lora_path}")
384
+ print(f"Liquid LoRA: {liquid_lora_path}")
385
+ print(f"Demopose LoRA: {demopose_lora_path}")
386
+ print(f"Voice LoRA: {voice_lora_path}")
387
+ print(f"Realism LoRA: {realism_lora_path}")
388
+ print(f"Transition LoRA: {transition_lora_path}")
389
+ # ----------------------------------------------------------------
390
+
391
+ print(f"Checkpoint: {checkpoint_path}")
392
+ print(f"Spatial upsampler: {spatial_upsampler_path}")
393
+ print(f"[Gemma] Root ready: {gemma_root}")
394
+
395
+ pipeline = LTX23DistilledA2VPipeline(
396
+ distilled_checkpoint_path=checkpoint_path,
397
+ spatial_upsampler_path=spatial_upsampler_path,
398
+ gemma_root=gemma_root,
399
+ loras=[],
400
+ quantization=QuantizationPolicy.fp8_cast(),
401
+ )
402
+
403
+ def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float, dreamlay_strength: float, mself_strength: float, dramatic_strength: float, fluid_strength: float, liquid_strength: float, demopose_strength: float, voice_strength: float, realism_strength: float, transition_strength: float) -> tuple[str, str]:
404
+ rp = round(float(pose_strength), 2)
405
+ rg = round(float(general_strength), 2)
406
+ rm = round(float(motion_strength), 2)
407
+ rd = round(float(dreamlay_strength), 2)
408
+ rs = round(float(mself_strength), 2)
409
+ rr = round(float(dramatic_strength), 2)
410
+ rf = round(float(fluid_strength), 2)
411
+ rl = round(float(liquid_strength), 2)
412
+ ro = round(float(demopose_strength), 2)
413
+ rv = round(float(voice_strength), 2)
414
+ re = round(float(realism_strength), 2)
415
+ rt = round(float(transition_strength), 2)
416
+ key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|{dreamlay_lora_path}:{rd}|{mself_lora_path}:{rs}|{dramatic_lora_path}:{rr}|{fluid_lora_path}:{rf}|{liquid_lora_path}:{rl}|{demopose_lora_path}:{ro}|{voice_lora_path}:{rv}|{realism_lora_path}:{re}|{transition_lora_path}:{rt}"
417
+ key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
418
+ return key, key_str
419
+
420
+
421
+ def prepare_lora_cache(
422
+ pose_strength: float,
423
+ general_strength: float,
424
+ motion_strength: float,
425
+ dreamlay_strength: float,
426
+ mself_strength: float,
427
+ dramatic_strength: float,
428
+ fluid_strength: float,
429
+ liquid_strength: float,
430
+ demopose_strength: float,
431
+ voice_strength: float,
432
+ realism_strength: float,
433
+ transition_strength: float,
434
+ progress=gr.Progress(track_tqdm=True),
435
+ ):
436
+ """
437
+ CPU-only step:
438
+ - checks cache
439
+ - loads cached fused transformer state_dict, or
440
+ - builds fused transformer on CPU and saves it
441
+ The resulting state_dict is stored in memory and can be applied later.
442
+ """
443
+ global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
444
+
445
+ ledger = pipeline.model_ledger
446
+ key, _ = _make_lora_key(pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength)
447
+ cache_path = LORA_CACHE_DIR / f"{key}.safetensors"
448
+
449
+ progress(0.05, desc="Preparing LoRA state")
450
+ if cache_path.exists():
451
+ try:
452
+ progress(0.20, desc="Loading cached fused state")
453
+ state = load_file(str(cache_path))
454
+ PENDING_LORA_KEY = key
455
+ PENDING_LORA_STATE = state
456
+ PENDING_LORA_STATUS = f"Loaded cached LoRA state: {cache_path.name}"
457
+ return PENDING_LORA_STATUS
458
+ except Exception as e:
459
+ print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
460
+
461
+ entries = [
462
+ (pose_lora_path, round(float(pose_strength), 2)),
463
+ (general_lora_path, round(float(general_strength), 2)),
464
+ (motion_lora_path, round(float(motion_strength), 2)),
465
+ (dreamlay_lora_path, round(float(dreamlay_strength), 2)),
466
+ (mself_lora_path, round(float(mself_strength), 2)),
467
+ (dramatic_lora_path, round(float(dramatic_strength), 2)),
468
+ (fluid_lora_path, round(float(fluid_strength), 2)),
469
+ (liquid_lora_path, round(float(liquid_strength), 2)),
470
+ (demopose_lora_path, round(float(demopose_strength), 2)),
471
+ (voice_lora_path, round(float(voice_strength), 2)),
472
+ (realism_lora_path, round(float(realism_strength), 2)),
473
+ (transition_lora_path, round(float(transition_strength), 2)),
474
+ ]
475
+ loras_for_builder = [
476
+ LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
477
+ for path, strength in entries
478
+ if path is not None and float(strength) != 0.0
479
+ ]
480
+
481
+ if not loras_for_builder:
482
+ PENDING_LORA_KEY = None
483
+ PENDING_LORA_STATE = None
484
+ PENDING_LORA_STATUS = "No non-zero LoRA strengths selected; nothing to prepare."
485
+ return PENDING_LORA_STATUS
486
+
487
+ tmp_ledger = None
488
+ new_transformer_cpu = None
489
+ try:
490
+ progress(0.35, desc="Building fused CPU transformer")
491
+ tmp_ledger = pipeline.model_ledger.__class__(
492
+ dtype=ledger.dtype,
493
+ device=torch.device("cpu"),
494
+ checkpoint_path=str(checkpoint_path),
495
+ spatial_upsampler_path=str(spatial_upsampler_path),
496
+ gemma_root_path=str(gemma_root),
497
+ loras=tuple(loras_for_builder),
498
+ quantization=getattr(ledger, "quantization", None),
499
+ )
500
+ new_transformer_cpu = tmp_ledger.transformer()
501
+
502
+ progress(0.70, desc="Extracting fused state_dict")
503
+ state = {
504
+ k: v.detach().cpu().contiguous()
505
+ for k, v in new_transformer_cpu.state_dict().items()
506
+ }
507
+ save_file(state, str(cache_path))
508
+
509
+ PENDING_LORA_KEY = key
510
+ PENDING_LORA_STATE = state
511
+ PENDING_LORA_STATUS = f"Built and cached LoRA state: {cache_path.name}"
512
+ return PENDING_LORA_STATUS
513
+
514
+ except Exception as e:
515
+ import traceback
516
+ print(f"[LoRA] Prepare failed: {type(e).__name__}: {e}")
517
+ print(traceback.format_exc())
518
+ PENDING_LORA_KEY = None
519
+ PENDING_LORA_STATE = None
520
+ PENDING_LORA_STATUS = f"LoRA prepare failed: {type(e).__name__}: {e}"
521
+ return PENDING_LORA_STATUS
522
+
523
+ finally:
524
+ try:
525
+ del new_transformer_cpu
526
+ except Exception:
527
+ pass
528
+ try:
529
+ del tmp_ledger
530
+ except Exception:
531
+ pass
532
+ gc.collect()
533
+
534
+
535
+ def apply_prepared_lora_state_to_pipeline():
536
+ """
537
+ Fast step: copy the already prepared CPU state into the live transformer.
538
+ This is the only part that should remain near generation time.
539
+ """
540
+ global current_lora_key, PENDING_LORA_KEY, PENDING_LORA_STATE
541
+
542
+ if PENDING_LORA_STATE is None or PENDING_LORA_KEY is None:
543
+ print("[LoRA] No prepared LoRA state available; skipping.")
544
+ return False
545
+
546
+ if current_lora_key == PENDING_LORA_KEY:
547
+ print("[LoRA] Prepared LoRA state already active; skipping.")
548
+ return True
549
+
550
+ existing_transformer = _transformer
551
+ with torch.no_grad():
552
+ missing, unexpected = existing_transformer.load_state_dict(PENDING_LORA_STATE, strict=False)
553
+ if missing or unexpected:
554
+ print(f"[LoRA] load_state_dict mismatch: missing={len(missing)}, unexpected={len(unexpected)}")
555
+
556
+ current_lora_key = PENDING_LORA_KEY
557
+ print("[LoRA] Prepared LoRA state applied to the pipeline.")
558
+ return True
559
+
560
+ # Preload all models for ZeroGPU tensor packing.
561
+ print("Preloading all models (including Gemma and audio components)...")
562
+ ledger = pipeline.model_ledger
563
+
564
+ # Save the original factory methods so we can rebuild individual components later.
565
+ # These are bound callables on ledger that will call the builder when invoked.
566
+ _orig_transformer_factory = ledger.transformer
567
+ _orig_video_encoder_factory = ledger.video_encoder
568
+ _orig_video_decoder_factory = ledger.video_decoder
569
+ _orig_audio_encoder_factory = ledger.audio_encoder
570
+ _orig_audio_decoder_factory = ledger.audio_decoder
571
+ _orig_vocoder_factory = ledger.vocoder
572
+ _orig_spatial_upsampler_factory = ledger.spatial_upsampler
573
+ _orig_text_encoder_factory = ledger.text_encoder
574
+ _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
575
+
576
+ # Call the original factories once to create the cached instances we will serve by default.
577
+ _transformer = _orig_transformer_factory()
578
+ _video_encoder = _orig_video_encoder_factory()
579
+ _video_decoder = _orig_video_decoder_factory()
580
+ _audio_encoder = _orig_audio_encoder_factory()
581
+ _audio_decoder = _orig_audio_decoder_factory()
582
+ _vocoder = _orig_vocoder_factory()
583
+ _spatial_upsampler = _orig_spatial_upsampler_factory()
584
+ _text_encoder = _orig_text_encoder_factory()
585
+ _embeddings_processor = _orig_gemma_embeddings_factory()
586
+
587
+ # Replace ledger methods with lightweight lambdas that return the cached instances.
588
+ # We keep the original factories above so we can call them later to rebuild components.
589
+ ledger.transformer = lambda: _transformer
590
+ ledger.video_encoder = lambda: _video_encoder
591
+ ledger.video_decoder = lambda: _video_decoder
592
+ ledger.audio_encoder = lambda: _audio_encoder
593
+ ledger.audio_decoder = lambda: _audio_decoder
594
+ ledger.vocoder = lambda: _vocoder
595
+ ledger.spatial_upsampler = lambda: _spatial_upsampler
596
+ ledger.text_encoder = lambda: _text_encoder
597
+ ledger.gemma_embeddings_processor = lambda: _embeddings_processor
598
+
599
+ print("All models preloaded (including Gemma text encoder and audio encoder)!")
600
+ # ---- REPLACE PRELOAD BLOCK END ----
601
+
602
+ print("=" * 80)
603
+ print("Pipeline ready!")
604
+ print("=" * 80)
605
+
606
+ def log_memory(tag: str):
607
+ if torch.cuda.is_available():
608
+ allocated = torch.cuda.memory_allocated() / 1024**3
609
+ peak = torch.cuda.max_memory_allocated() / 1024**3
610
+ free, total = torch.cuda.mem_get_info()
611
+ print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
612
+
613
+
614
+ def detect_aspect_ratio(image) -> str:
615
+ if image is None:
616
+ return "16:9"
617
+ if hasattr(image, "size"):
618
+ w, h = image.size
619
+ elif hasattr(image, "shape"):
620
+ h, w = image.shape[:2]
621
+ else:
622
+ return "16:9"
623
+ ratio = w / h
624
+ candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
625
+ return min(candidates, key=lambda k: abs(ratio - candidates[k]))
626
+
627
+
628
+ def on_image_upload(first_image, last_image, high_res):
629
+ ref_image = first_image if first_image is not None else last_image
630
+ aspect = detect_aspect_ratio(ref_image)
631
+ tier = "high" if high_res else "low"
632
+ w, h = RESOLUTIONS[tier][aspect]
633
+ return gr.update(value=w), gr.update(value=h)
634
+
635
+
636
+ def on_highres_toggle(first_image, last_image, high_res):
637
+ ref_image = first_image if first_image is not None else last_image
638
+ aspect = detect_aspect_ratio(ref_image)
639
+ tier = "high" if high_res else "low"
640
+ w, h = RESOLUTIONS[tier][aspect]
641
+ return gr.update(value=w), gr.update(value=h)
642
+
643
+ def get_gpu_duration(
644
+ first_image,
645
+ last_image,
646
+ input_audio,
647
+ prompt: str,
648
+ negative_prompt: str,
649
+ duration: float,
650
+ gpu_duration: float,
651
+ enhance_prompt: bool = True,
652
+ seed: int = 42,
653
+ randomize_seed: bool = True,
654
+ height: int = 1024,
655
+ width: int = 1536,
656
+ video_cfg_scale: float = 1.0,
657
+ video_stg_scale: float = 0.0,
658
+ video_rescale_scale: float = 0.45,
659
+ video_a2v_scale: float = 3.0,
660
+ audio_cfg_scale: float = 1.0,
661
+ audio_stg_scale: float = 0.0,
662
+ audio_rescale_scale: float = 1.0,
663
+ audio_v2a_scale: float = 3.0,
664
+ pose_strength: float = 0.0,
665
+ general_strength: float = 0.0,
666
+ motion_strength: float = 0.0,
667
+ dreamlay_strength: float = 0.0,
668
+ mself_strength: float = 0.0,
669
+ dramatic_strength: float = 0.0,
670
+ fluid_strength: float = 0.0,
671
+ liquid_strength: float = 0.0,
672
+ demopose_strength: float = 0.0,
673
+ voice_strength: float = 0.0,
674
+ realism_strength: float = 0.0,
675
+ transition_strength: float = 0.0,
676
+ progress=None,
677
+ ):
678
+ return int(gpu_duration)
679
+
680
+ @spaces.GPU(duration=get_gpu_duration)
681
+ @torch.inference_mode()
682
+ def generate_video(
683
+ first_image,
684
+ last_image,
685
+ input_audio,
686
+ prompt: str,
687
+ negative_prompt: str,
688
+ duration: float,
689
+ gpu_duration: float,
690
+ enhance_prompt: bool = True,
691
+ seed: int = 42,
692
+ randomize_seed: bool = True,
693
+ height: int = 1024,
694
+ width: int = 1536,
695
+ video_cfg_scale: float = 1.0,
696
+ video_stg_scale: float = 0.0,
697
+ video_rescale_scale: float = 0.45,
698
+ video_a2v_scale: float = 3.0,
699
+ audio_cfg_scale: float = 1.0,
700
+ audio_stg_scale: float = 0.0,
701
+ audio_rescale_scale: float = 1.0,
702
+ audio_v2a_scale: float = 3.0,
703
+ pose_strength: float = 0.0,
704
+ general_strength: float = 0.0,
705
+ motion_strength: float = 0.0,
706
+ dreamlay_strength: float = 0.0,
707
+ mself_strength: float = 0.0,
708
+ dramatic_strength: float = 0.0,
709
+ fluid_strength: float = 0.0,
710
+ liquid_strength: float = 0.0,
711
+ demopose_strength: float = 0.0,
712
+ voice_strength: float = 0.0,
713
+ realism_strength: float = 0.0,
714
+ transition_strength: float = 0.0,
715
+ progress=gr.Progress(track_tqdm=True),
716
+ ):
717
+ try:
718
+ torch.cuda.reset_peak_memory_stats()
719
+ log_memory("start")
720
+
721
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
722
+
723
+ frame_rate = DEFAULT_FRAME_RATE
724
+ num_frames = int(duration * frame_rate) + 1
725
+ num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
726
+
727
+ print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
728
+
729
+ images = []
730
+ output_dir = Path("outputs")
731
+ output_dir.mkdir(exist_ok=True)
732
+
733
+ if first_image is not None:
734
+ temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
735
+ if hasattr(first_image, "save"):
736
+ first_image.save(temp_first_path)
737
+ else:
738
+ temp_first_path = Path(first_image)
739
+ images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
740
+
741
+ if last_image is not None:
742
+ temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
743
+ if hasattr(last_image, "save"):
744
+ last_image.save(temp_last_path)
745
+ else:
746
+ temp_last_path = Path(last_image)
747
+ images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
748
+
749
+ tiling_config = TilingConfig.default()
750
+ video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
751
+
752
+ video_guider_params = MultiModalGuiderParams(
753
+ cfg_scale=video_cfg_scale,
754
+ stg_scale=video_stg_scale,
755
+ rescale_scale=video_rescale_scale,
756
+ modality_scale=video_a2v_scale,
757
+ skip_step=0,
758
+ stg_blocks=[],
759
+ )
760
+
761
+ audio_guider_params = MultiModalGuiderParams(
762
+ cfg_scale=audio_cfg_scale,
763
+ stg_scale=audio_stg_scale,
764
+ rescale_scale=audio_rescale_scale,
765
+ modality_scale=audio_v2a_scale,
766
+ skip_step=0,
767
+ stg_blocks=[],
768
+ )
769
+
770
+ log_memory("before pipeline call")
771
+
772
+ apply_prepared_lora_state_to_pipeline()
773
+
774
+ video, audio = pipeline(
775
+ prompt=prompt,
776
+ negative_prompt=negative_prompt,
777
+ seed=current_seed,
778
+ height=int(height),
779
+ width=int(width),
780
+ num_frames=num_frames,
781
+ frame_rate=frame_rate,
782
+ video_guider_params=video_guider_params,
783
+ audio_guider_params=audio_guider_params,
784
+ images=images,
785
+ audio_path=input_audio,
786
+ tiling_config=tiling_config,
787
+ enhance_prompt=enhance_prompt,
788
+ )
789
+
790
+ log_memory("after pipeline call")
791
+
792
+ output_path = tempfile.mktemp(suffix=".mp4")
793
+ encode_video(
794
+ video=video,
795
+ fps=frame_rate,
796
+ audio=audio,
797
+ output_path=output_path,
798
+ video_chunks_number=video_chunks_number,
799
+ )
800
+
801
+ log_memory("after encode_video")
802
+ return str(output_path), current_seed
803
+
804
+ except Exception as e:
805
+ import traceback
806
+ log_memory("on error")
807
+ print(f"Error: {str(e)}\n{traceback.format_exc()}")
808
+ return None, current_seed
809
+
810
+ # =============================================================================
811
+ # Gradio UI
812
+ # =============================================================================
813
+
814
+ css = """
815
+ .fillable {max-width: 1200px !important}
816
+ .progress-text {color: black}
817
+ """
818
+
819
+ with gr.Blocks(title="LTX-2.3 Distilled with LoRAs, Negative Prompting, and Advanced Settings") as demo:
820
+ gr.Markdown("# LTX-2.3 Two-Stage HQ Video Generation")
821
+ gr.Markdown(
822
+ "High-quality text/image-to-video with cached LoRA state + CFG guidance. "
823
+ "[[Model]](https://huggingface.co/Lightricks/LTX-2.3)"
824
+ )
825
+
826
+ with gr.Row():
827
+ # LEFT SIDE: Input Controls
828
+ with gr.Column():
829
+ with gr.Row():
830
+ first_image = gr.Image(label="First Frame (Optional)", type="pil")
831
+ last_image = gr.Image(label="Last Frame (Optional)", type="pil")
832
+
833
+ prompt = gr.Textbox(
834
+ label="Prompt",
835
+ value="Make this image come alive with cinematic motion, smooth animation",
836
+ lines=3,
837
+ placeholder="Describe the motion and animation you want...",
838
+ )
839
+
840
+ negative_prompt = gr.Textbox(
841
+ label="Negative Prompt",
842
+ value="List what you specifically don't want",
843
+ lines=2,
844
+ )
845
+
846
+ duration = gr.Slider(
847
+ label="Duration (seconds)",
848
+ minimum=1.0, maximum=30.0, value=10.0, step=0.1,
849
+ )
850
+
851
+ with gr.Row():
852
+ seed = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=MAX_SEED)
853
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
854
+
855
+ with gr.Row():
856
+ high_res = gr.Checkbox(label="High Resolution", value=True)
857
+ enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
858
+
859
+ with gr.Row():
860
+ width = gr.Number(label="Width", value=1536, precision=0)
861
+ height = gr.Number(label="Height", value=1024, precision=0)
862
+
863
+ generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
864
+
865
+ with gr.Accordion("Advanced Settings", open=False):
866
+ gr.Markdown("### Video Guidance Parameters")
867
+
868
+ with gr.Row():
869
+ video_cfg_scale = gr.Slider(
870
+ label="Video CFG Scale", minimum=1.0, maximum=10.0, value=1.0, step=0.1
871
+ )
872
+ video_stg_scale = gr.Slider(
873
+ label="Video STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1
874
+ )
875
+
876
+ with gr.Row():
877
+ video_rescale_scale = gr.Slider(
878
+ label="Video Rescale", minimum=0.0, maximum=2.0, value=0.45, step=0.1
879
+ )
880
+ video_a2v_scale = gr.Slider(
881
+ label="A2V Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1
882
+ )
883
+
884
+ gr.Markdown("### Audio Guidance Parameters")
885
+
886
+ with gr.Row():
887
+ audio_cfg_scale = gr.Slider(
888
+ label="Audio CFG Scale", minimum=1.0, maximum=15.0, value=1.0, step=0.1
889
+ )
890
+ audio_stg_scale = gr.Slider(
891
+ label="Audio STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1
892
+ )
893
+
894
+ with gr.Row():
895
+ audio_rescale_scale = gr.Slider(
896
+ label="Audio Rescale", minimum=0.0, maximum=2.0, value=1.0, step=0.1
897
+ )
898
+ audio_v2a_scale = gr.Slider(
899
+ label="V2A Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1
900
+ )
901
+ with gr.Row():
902
+ input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
903
+
904
+ # RIGHT SIDE: Output and LoRA
905
+ with gr.Column():
906
+ output_video = gr.Video(label="Generated Video", autoplay=False)
907
+
908
+ gpu_duration = gr.Slider(
909
+ label="ZeroGPU duration (seconds)",
910
+ minimum=30.0, maximum=240.0, value=90.0, step=1.0,
911
+ info="Increase for longer videos, higher resolution, or LoRA usage"
912
+ )
913
+
914
+ gr.Markdown("### LoRA Adapter Strengths")
915
+ gr.Markdown("Set to 0 to disable, then click 'Prepare LoRA Cache'")
916
+
917
+ with gr.Row():
918
+ pose_strength = gr.Slider(label="Anthro Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
919
+ gr.Markdown("") # Spacer for alignment
920
+
921
+ with gr.Row():
922
+ general_strength = gr.Slider(label="Reasoning Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
923
+ motion_strength = gr.Slider(label="Anthro Posing", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
924
+
925
+ with gr.Row():
926
+ dreamlay_strength = gr.Slider(label="Dreamlay", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
927
+ mself_strength = gr.Slider(label="Mself", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
928
+
929
+ with gr.Row():
930
+ dramatic_strength = gr.Slider(label="Dramatic", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
931
+ fluid_strength = gr.Slider(label="Fluid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
932
+
933
+ with gr.Row():
934
+ liquid_strength = gr.Slider(label="Liquid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
935
+ demopose_strength = gr.Slider(label="Audio Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
936
+
937
+ with gr.Row():
938
+ voice_strength = gr.Slider(label="Voice Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
939
+ realism_strength = gr.Slider(label="Anthro Realism", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
940
+
941
+ with gr.Row():
942
+ transition_strength = gr.Slider(label="POV", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
943
+ gr.Markdown("") # Spacer for alignment
944
+
945
+ prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
946
+ lora_status = gr.Textbox(
947
+ label="LoRA Cache Status",
948
+ value="No LoRA state prepared yet.",
949
+ interactive=False,
950
+ )
951
+
952
+ # Event handlers
953
+ first_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height])
954
+ last_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height])
955
+ high_res.change(fn=on_highres_toggle, inputs=[first_image, last_image, high_res], outputs=[width, height])
956
+
957
+ prepare_lora_btn.click(
958
+ fn=prepare_lora_cache,
959
+ inputs=[pose_strength, general_strength, motion_strength, dreamlay_strength,
960
+ mself_strength, dramatic_strength, fluid_strength, liquid_strength,
961
+ demopose_strength, voice_strength, realism_strength, transition_strength],
962
+ outputs=[lora_status],
963
+ )
964
+
965
+ generate_btn.click(
966
+ fn=generate_video,
967
+ inputs=[
968
+ first_image, last_image, input_audio, prompt, negative_prompt, duration, gpu_duration,
969
+ enhance_prompt, seed, randomize_seed, height, width,
970
+ video_cfg_scale, video_stg_scale, video_rescale_scale, video_a2v_scale,
971
+ audio_cfg_scale, audio_stg_scale, audio_rescale_scale, audio_v2a_scale,
972
+ pose_strength, general_strength, motion_strength,
973
+ dreamlay_strength, mself_strength, dramatic_strength, fluid_strength,
974
+ liquid_strength, demopose_strength, voice_strength, realism_strength,
975
+ transition_strength,
976
+ ],
977
+ outputs=[output_video, seed],
978
+ )
979
+
980
+
981
+ if __name__ == "__main__":
982
+ demo.queue().launch(theme=gr.themes.Citrus(), css=css, mcp_server=False)