dagloop5 commited on
Commit
fd36099
·
verified ·
1 Parent(s): 4ff9f31

Upload app(3).py

Browse files
Files changed (1) hide show
  1. app(3).py +983 -0
app(3).py ADDED
@@ -0,0 +1,983 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import sys
4
+
5
+ # Disable torch.compile / dynamo before any torch import
6
+ os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
+ os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
+
9
+ # Install xformers for memory-efficient attention
10
+ subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
+
12
+ # Clone LTX-2 repo and install packages
13
+ LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
+ LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
+
16
+ LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2" # known working commit with decode_video
17
+
18
+ if not os.path.exists(LTX_REPO_DIR):
19
+ print(f"Cloning {LTX_REPO_URL}...")
20
+ subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True)
21
+ subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True)
22
+
23
+ print("Installing ltx-core and ltx-pipelines from cloned repo...")
24
+ subprocess.run(
25
+ [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
26
+ os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
27
+ "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
28
+ check=True,
29
+ )
30
+
31
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
+
34
+ import logging
35
+ import random
36
+ import tempfile
37
+ from pathlib import Path
38
+ import gc
39
+ import hashlib
40
+
41
+ import torch
42
+ torch._dynamo.config.suppress_errors = True
43
+ torch._dynamo.config.disable = True
44
+
45
+ import spaces
46
+ import gradio as gr
47
+ import numpy as np
48
+ from huggingface_hub import hf_hub_download, snapshot_download
49
+ from safetensors.torch import load_file, save_file
50
+ from safetensors import safe_open
51
+ import json
52
+ import requests
53
+
54
+ from ltx_core.components.diffusion_steps import EulerDiffusionStep
55
+ from ltx_core.components.guiders import MultiModalGuider, MultiModalGuiderParams
56
+ from ltx_core.components.noisers import GaussianNoiser
57
+ from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
58
+ from ltx_core.model.audio_vae import decode_audio as vae_decode_audio
59
+ from ltx_core.model.upsampler import upsample_video
60
+ from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
61
+ from ltx_core.quantization import QuantizationPolicy
62
+ from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
63
+ from ltx_pipelines.distilled import DistilledPipeline
64
+ from ltx_pipelines.utils import euler_denoising_loop
65
+ from ltx_pipelines.utils.args import ImageConditioningInput
66
+ from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
67
+ from ltx_pipelines.utils.helpers import (
68
+ cleanup_memory,
69
+ combined_image_conditionings,
70
+ denoise_video_only,
71
+ encode_prompts,
72
+ simple_denoising_func,
73
+ multi_modal_guider_denoising_func,
74
+ )
75
+ from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
76
+ from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
77
+ from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
78
+
79
+ # Force-patch xformers attention into the LTX attention module.
80
+ from ltx_core.model.transformer import attention as _attn_mod
81
+ print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
82
+ try:
83
+ from xformers.ops import memory_efficient_attention as _mea
84
+ _attn_mod.memory_efficient_attention = _mea
85
+ print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
86
+ except Exception as e:
87
+ print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
88
+
89
+ logging.getLogger().setLevel(logging.INFO)
90
+
91
+ MAX_SEED = np.iinfo(np.int32).max
92
+ DEFAULT_PROMPT = (
93
+ "An astronaut hatches from a fragile egg on the surface of the Moon, "
94
+ "the shell cracking and peeling apart in gentle low-gravity motion. "
95
+ "Fine lunar dust lifts and drifts outward with each movement, floating "
96
+ "in slow arcs before settling back onto the ground."
97
+ )
98
+ DEFAULT_NEGATIVE_PROMPT = (
99
+ "worst quality, inconsistent motion, blurry, jittery, distorted, "
100
+ "deformed, artifacts, text, watermark, logo, frame, border, "
101
+ "low resolution, pixelated, unnatural, fake, CGI, cartoon"
102
+ )
103
+ DEFAULT_FRAME_RATE = 24.0
104
+
105
+ # Resolution presets: (width, height)
106
+ RESOLUTIONS = {
107
+ "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
108
+ "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
109
+ }
110
+
111
+ class LTX23DistilledA2VPipeline:
112
+ """Standalone pipeline with optional audio conditioning — no parent class."""
113
+
114
+ def __init__(
115
+ self,
116
+ distilled_checkpoint_path: str,
117
+ spatial_upsampler_path: str,
118
+ gemma_root: str,
119
+ loras: tuple,
120
+ quantization: QuantizationPolicy | None = None,
121
+ ):
122
+ from ltx_pipelines.utils import ModelLedger, denoise_audio_video
123
+ from ltx_pipelines.utils.types import PipelineComponents
124
+
125
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
126
+ self.dtype = torch.bfloat16
127
+
128
+ self.model_ledger = ModelLedger(
129
+ dtype=self.dtype,
130
+ device=self.device,
131
+ checkpoint_path=distilled_checkpoint_path,
132
+ gemma_root_path=gemma_root,
133
+ spatial_upsampler_path=spatial_upsampler_path,
134
+ loras=loras,
135
+ quantization=quantization,
136
+ )
137
+
138
+ self.pipeline_components = PipelineComponents(
139
+ dtype=self.dtype,
140
+ device=self.device,
141
+ )
142
+
143
+ def __call__(
144
+ self,
145
+ prompt: str,
146
+ negative_prompt: str,
147
+ seed: int,
148
+ height: int,
149
+ width: int,
150
+ num_frames: int,
151
+ frame_rate: float,
152
+ video_guider_params: MultiModalGuiderParams,
153
+ audio_guider_params: MultiModalGuiderParams,
154
+ images: list[ImageConditioningInput],
155
+ audio_path: str | None = None,
156
+ tiling_config: TilingConfig | None = None,
157
+ enhance_prompt: bool = False,
158
+ ):
159
+ print(prompt)
160
+
161
+ generator = torch.Generator(device=self.device).manual_seed(seed)
162
+ noiser = GaussianNoiser(generator=generator)
163
+ stepper = EulerDiffusionStep()
164
+ dtype = torch.bfloat16
165
+
166
+ ctx_p, ctx_n = encode_prompts(
167
+ [prompt, negative_prompt],
168
+ self.model_ledger,
169
+ enhance_first_prompt=enhance_prompt,
170
+ enhance_prompt_image=images[0].path if len(images) > 0 else None,
171
+ )
172
+ v_context_p, a_context_p = ctx_p.video_encoding, ctx_p.audio_encoding
173
+ v_context_n, a_context_n = ctx_n.video_encoding, ctx_n.audio_encoding
174
+
175
+ # ── Audio encoding (only for conditioning, not output generation) ──
176
+ encoded_audio_latent = None
177
+ decoded_audio = None
178
+ if audio_path is not None:
179
+ video_duration = num_frames / frame_rate
180
+ decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
181
+ if decoded_audio is None:
182
+ raise ValueError(f"Could not extract audio stream from {audio_path}")
183
+
184
+ encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
185
+ audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
186
+ expected_frames = audio_shape.frames
187
+ actual_frames = encoded_audio_latent.shape[2]
188
+
189
+ if actual_frames > expected_frames:
190
+ encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
191
+ elif actual_frames < expected_frames:
192
+ pad = torch.zeros(
193
+ encoded_audio_latent.shape[0],
194
+ encoded_audio_latent.shape[1],
195
+ expected_frames - actual_frames,
196
+ encoded_audio_latent.shape[3],
197
+ device=encoded_audio_latent.device,
198
+ dtype=encoded_audio_latent.dtype,
199
+ )
200
+ encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
201
+
202
+ video_encoder = self.model_ledger.video_encoder()
203
+ transformer = self.model_ledger.transformer()
204
+ stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
205
+
206
+ def stage1_denoising_loop(sigmas, video_state, audio_state, stepper):
207
+ return euler_denoising_loop(
208
+ sigmas=sigmas,
209
+ video_state=video_state,
210
+ audio_state=audio_state,
211
+ stepper=stepper,
212
+ denoise_fn=multi_modal_guider_denoising_func(
213
+ video_guider=MultiModalGuider(
214
+ params=video_guider_params,
215
+ negative_context=v_context_n,
216
+ ),
217
+ audio_guider=MultiModalGuider(
218
+ params=audio_guider_params,
219
+ negative_context=a_context_n,
220
+ ),
221
+ v_context=v_context_p,
222
+ a_context=a_context_p,
223
+ transformer=transformer,
224
+ ),
225
+ )
226
+
227
+ def stage2_denoising_loop(sigmas, video_state, audio_state, stepper):
228
+ return euler_denoising_loop(
229
+ sigmas=sigmas,
230
+ video_state=video_state,
231
+ audio_state=audio_state,
232
+ stepper=stepper,
233
+ denoise_fn=simple_denoising_func(
234
+ video_context=v_context_p,
235
+ audio_context=a_context_p,
236
+ transformer=transformer,
237
+ ),
238
+ )
239
+
240
+ # ── Stage 1: Half resolution ──
241
+ stage_1_output_shape = VideoPixelShape(
242
+ batch=1,
243
+ frames=num_frames,
244
+ width=width // 2,
245
+ height=height // 2,
246
+ fps=frame_rate,
247
+ )
248
+ stage_1_conditionings = combined_image_conditionings(
249
+ images=images,
250
+ height=stage_1_output_shape.height,
251
+ width=stage_1_output_shape.width,
252
+ video_encoder=video_encoder,
253
+ dtype=dtype,
254
+ device=self.device,
255
+ )
256
+
257
+ # Use denoise_audio_video so audio is ALWAYS generated
258
+ from ltx_pipelines.utils import denoise_audio_video
259
+ video_state, audio_state = denoise_audio_video(
260
+ output_shape=stage_1_output_shape,
261
+ conditionings=stage_1_conditionings,
262
+ noiser=noiser,
263
+ sigmas=stage_1_sigmas,
264
+ stepper=stepper,
265
+ denoising_loop_fn=stage1_denoising_loop,
266
+ components=self.pipeline_components,
267
+ dtype=dtype,
268
+ device=self.device,
269
+ initial_audio_latent=encoded_audio_latent,
270
+ )
271
+
272
+ torch.cuda.synchronize()
273
+ cleanup_memory()
274
+
275
+ # ── Upscaling ──
276
+ upscaled_video_latent = upsample_video(
277
+ latent=video_state.latent[:1],
278
+ video_encoder=video_encoder,
279
+ upsampler=self.model_ledger.spatial_upsampler(),
280
+ )
281
+
282
+ # ── Stage 2: Full resolution ──
283
+ stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
284
+ stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
285
+ stage_2_conditionings = combined_image_conditionings(
286
+ images=images,
287
+ height=stage_2_output_shape.height,
288
+ width=stage_2_output_shape.width,
289
+ video_encoder=video_encoder,
290
+ dtype=dtype,
291
+ device=self.device,
292
+ )
293
+ video_state, audio_state = denoise_audio_video(
294
+ output_shape=stage_2_output_shape,
295
+ conditionings=stage_2_conditionings,
296
+ noiser=noiser,
297
+ sigmas=stage_2_sigmas,
298
+ stepper=stepper,
299
+ denoising_loop_fn=stage2_denoising_loop,
300
+ components=self.pipeline_components,
301
+ dtype=dtype,
302
+ device=self.device,
303
+ noise_scale=stage_2_sigmas[0],
304
+ initial_video_latent=upscaled_video_latent,
305
+ initial_audio_latent=audio_state.latent,
306
+ )
307
+
308
+ torch.cuda.synchronize()
309
+ del transformer
310
+ del video_encoder
311
+ cleanup_memory()
312
+
313
+ # ── Decode both video and audio ──
314
+ decoded_video = vae_decode_video(
315
+ video_state.latent,
316
+ self.model_ledger.video_decoder(),
317
+ tiling_config,
318
+ generator,
319
+ )
320
+ decoded_audio_output = vae_decode_audio(
321
+ audio_state.latent,
322
+ self.model_ledger.audio_decoder(),
323
+ self.model_ledger.vocoder(),
324
+ )
325
+
326
+ return decoded_video, decoded_audio_output
327
+
328
+ # Model repos
329
+ LTX_MODEL_REPO = "Lightricks/LTX-2.3"
330
+ GEMMA_REPO ="Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
331
+
332
+ # Download model checkpoints
333
+ print("=" * 80)
334
+ print("Downloading LTX-2.3 distilled model + Gemma...")
335
+ print("=" * 80)
336
+
337
+ # LoRA cache directory and currently-applied key
338
+ LORA_CACHE_DIR = Path("lora_cache")
339
+ LORA_CACHE_DIR.mkdir(exist_ok=True)
340
+ current_lora_key: str | None = None
341
+
342
+ PENDING_LORA_KEY: str | None = None
343
+ PENDING_LORA_STATE: dict[str, torch.Tensor] | None = None
344
+ PENDING_LORA_STATUS: str = "No LoRA state prepared yet."
345
+
346
+ weights_dir = Path("weights")
347
+ weights_dir.mkdir(exist_ok=True)
348
+ checkpoint_path = hf_hub_download(
349
+ repo_id=LTX_MODEL_REPO,
350
+ filename="ltx-2.3-22b-distilled-1.1.safetensors",
351
+ local_dir=str(weights_dir),
352
+ local_dir_use_symlinks=False,
353
+ )
354
+ spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.1.safetensors")
355
+ gemma_root = snapshot_download(repo_id=GEMMA_REPO)
356
+
357
+
358
+ # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
359
+ # LoRA repo + download the requested LoRA adapters
360
+ LORA_REPO = "dagloop5/LoRA"
361
+
362
+ print("=" * 80)
363
+ print("Downloading LoRA adapters from dagloop5/LoRA...")
364
+ print("=" * 80)
365
+ pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
366
+ general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_reasoning_I2V_V3.safetensors")
367
+ motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
368
+ dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors") # m15510n4ry, bl0wj0b, d0ubl3_bj, d0gg1e, c0wg1rl
369
+ mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors") # Hyperfap
370
+ dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors") # "[He | She] is having am orgasm." (am or an?)
371
+ fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="cr3ampi3_animation_i2v_ltx2_v1.0.safetensors") # cr3ampi3 animation., missionary animation, doggystyle bouncy animation, double penetration animation
372
+ liquid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="liquid_wet_dr1pp_ltx2_v1.0_scaled.safetensors") # wet dr1pp
373
+ demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="clapping-cheeks-audio-v001-alpha.safetensors")
374
+ voice_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="hentai_voice_ltx23.safetensors")
375
+ realism_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="FurryenhancerLTX2.3V1.215.safetensors")
376
+ transition_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2_takerpov_lora_v1.2.safetensors") # takerpov1, taker pov
377
+
378
+ print(f"Pose LoRA: {pose_lora_path}")
379
+ print(f"General LoRA: {general_lora_path}")
380
+ print(f"Motion LoRA: {motion_lora_path}")
381
+ print(f"Dreamlay LoRA: {dreamlay_lora_path}")
382
+ print(f"Mself LoRA: {mself_lora_path}")
383
+ print(f"Dramatic LoRA: {dramatic_lora_path}")
384
+ print(f"Fluid LoRA: {fluid_lora_path}")
385
+ print(f"Liquid LoRA: {liquid_lora_path}")
386
+ print(f"Demopose LoRA: {demopose_lora_path}")
387
+ print(f"Voice LoRA: {voice_lora_path}")
388
+ print(f"Realism LoRA: {realism_lora_path}")
389
+ print(f"Transition LoRA: {transition_lora_path}")
390
+ # ----------------------------------------------------------------
391
+
392
+ print(f"Checkpoint: {checkpoint_path}")
393
+ print(f"Spatial upsampler: {spatial_upsampler_path}")
394
+ print(f"[Gemma] Root ready: {gemma_root}")
395
+
396
+ pipeline = LTX23DistilledA2VPipeline(
397
+ distilled_checkpoint_path=checkpoint_path,
398
+ spatial_upsampler_path=spatial_upsampler_path,
399
+ gemma_root=gemma_root,
400
+ loras=[],
401
+ quantization=QuantizationPolicy.fp8_cast(),
402
+ )
403
+
404
+ def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float, dreamlay_strength: float, mself_strength: float, dramatic_strength: float, fluid_strength: float, liquid_strength: float, demopose_strength: float, voice_strength: float, realism_strength: float, transition_strength: float) -> tuple[str, str]:
405
+ rp = round(float(pose_strength), 2)
406
+ rg = round(float(general_strength), 2)
407
+ rm = round(float(motion_strength), 2)
408
+ rd = round(float(dreamlay_strength), 2)
409
+ rs = round(float(mself_strength), 2)
410
+ rr = round(float(dramatic_strength), 2)
411
+ rf = round(float(fluid_strength), 2)
412
+ rl = round(float(liquid_strength), 2)
413
+ ro = round(float(demopose_strength), 2)
414
+ rv = round(float(voice_strength), 2)
415
+ re = round(float(realism_strength), 2)
416
+ rt = round(float(transition_strength), 2)
417
+ key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|{dreamlay_lora_path}:{rd}|{mself_lora_path}:{rs}|{dramatic_lora_path}:{rr}|{fluid_lora_path}:{rf}|{liquid_lora_path}:{rl}|{demopose_lora_path}:{ro}|{voice_lora_path}:{rv}|{realism_lora_path}:{re}|{transition_lora_path}:{rt}"
418
+ key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
419
+ return key, key_str
420
+
421
+
422
+ def prepare_lora_cache(
423
+ pose_strength: float,
424
+ general_strength: float,
425
+ motion_strength: float,
426
+ dreamlay_strength: float,
427
+ mself_strength: float,
428
+ dramatic_strength: float,
429
+ fluid_strength: float,
430
+ liquid_strength: float,
431
+ demopose_strength: float,
432
+ voice_strength: float,
433
+ realism_strength: float,
434
+ transition_strength: float,
435
+ progress=gr.Progress(track_tqdm=True),
436
+ ):
437
+ """
438
+ CPU-only step:
439
+ - checks cache
440
+ - loads cached fused transformer state_dict, or
441
+ - builds fused transformer on CPU and saves it
442
+ The resulting state_dict is stored in memory and can be applied later.
443
+ """
444
+ global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
445
+
446
+ ledger = pipeline.model_ledger
447
+ key, _ = _make_lora_key(pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength)
448
+ cache_path = LORA_CACHE_DIR / f"{key}.safetensors"
449
+
450
+ progress(0.05, desc="Preparing LoRA state")
451
+ if cache_path.exists():
452
+ try:
453
+ progress(0.20, desc="Loading cached fused state")
454
+ state = load_file(str(cache_path))
455
+ PENDING_LORA_KEY = key
456
+ PENDING_LORA_STATE = state
457
+ PENDING_LORA_STATUS = f"Loaded cached LoRA state: {cache_path.name}"
458
+ return PENDING_LORA_STATUS
459
+ except Exception as e:
460
+ print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
461
+
462
+ entries = [
463
+ (pose_lora_path, round(float(pose_strength), 2)),
464
+ (general_lora_path, round(float(general_strength), 2)),
465
+ (motion_lora_path, round(float(motion_strength), 2)),
466
+ (dreamlay_lora_path, round(float(dreamlay_strength), 2)),
467
+ (mself_lora_path, round(float(mself_strength), 2)),
468
+ (dramatic_lora_path, round(float(dramatic_strength), 2)),
469
+ (fluid_lora_path, round(float(fluid_strength), 2)),
470
+ (liquid_lora_path, round(float(liquid_strength), 2)),
471
+ (demopose_lora_path, round(float(demopose_strength), 2)),
472
+ (voice_lora_path, round(float(voice_strength), 2)),
473
+ (realism_lora_path, round(float(realism_strength), 2)),
474
+ (transition_lora_path, round(float(transition_strength), 2)),
475
+ ]
476
+ loras_for_builder = [
477
+ LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
478
+ for path, strength in entries
479
+ if path is not None and float(strength) != 0.0
480
+ ]
481
+
482
+ if not loras_for_builder:
483
+ PENDING_LORA_KEY = None
484
+ PENDING_LORA_STATE = None
485
+ PENDING_LORA_STATUS = "No non-zero LoRA strengths selected; nothing to prepare."
486
+ return PENDING_LORA_STATUS
487
+
488
+ tmp_ledger = None
489
+ new_transformer_cpu = None
490
+ try:
491
+ progress(0.35, desc="Building fused CPU transformer")
492
+ tmp_ledger = pipeline.model_ledger.__class__(
493
+ dtype=ledger.dtype,
494
+ device=torch.device("cpu"),
495
+ checkpoint_path=str(checkpoint_path),
496
+ spatial_upsampler_path=str(spatial_upsampler_path),
497
+ gemma_root_path=str(gemma_root),
498
+ loras=tuple(loras_for_builder),
499
+ quantization=getattr(ledger, "quantization", None),
500
+ )
501
+ new_transformer_cpu = tmp_ledger.transformer()
502
+
503
+ progress(0.70, desc="Extracting fused state_dict")
504
+ state = {
505
+ k: v.detach().cpu().contiguous()
506
+ for k, v in new_transformer_cpu.state_dict().items()
507
+ }
508
+ save_file(state, str(cache_path))
509
+
510
+ PENDING_LORA_KEY = key
511
+ PENDING_LORA_STATE = state
512
+ PENDING_LORA_STATUS = f"Built and cached LoRA state: {cache_path.name}"
513
+ return PENDING_LORA_STATUS
514
+
515
+ except Exception as e:
516
+ import traceback
517
+ print(f"[LoRA] Prepare failed: {type(e).__name__}: {e}")
518
+ print(traceback.format_exc())
519
+ PENDING_LORA_KEY = None
520
+ PENDING_LORA_STATE = None
521
+ PENDING_LORA_STATUS = f"LoRA prepare failed: {type(e).__name__}: {e}"
522
+ return PENDING_LORA_STATUS
523
+
524
+ finally:
525
+ try:
526
+ del new_transformer_cpu
527
+ except Exception:
528
+ pass
529
+ try:
530
+ del tmp_ledger
531
+ except Exception:
532
+ pass
533
+ gc.collect()
534
+
535
+
536
+ def apply_prepared_lora_state_to_pipeline():
537
+ """
538
+ Fast step: copy the already prepared CPU state into the live transformer.
539
+ This is the only part that should remain near generation time.
540
+ """
541
+ global current_lora_key, PENDING_LORA_KEY, PENDING_LORA_STATE
542
+
543
+ if PENDING_LORA_STATE is None or PENDING_LORA_KEY is None:
544
+ print("[LoRA] No prepared LoRA state available; skipping.")
545
+ return False
546
+
547
+ if current_lora_key == PENDING_LORA_KEY:
548
+ print("[LoRA] Prepared LoRA state already active; skipping.")
549
+ return True
550
+
551
+ existing_transformer = _transformer
552
+ with torch.no_grad():
553
+ missing, unexpected = existing_transformer.load_state_dict(PENDING_LORA_STATE, strict=False)
554
+ if missing or unexpected:
555
+ print(f"[LoRA] load_state_dict mismatch: missing={len(missing)}, unexpected={len(unexpected)}")
556
+
557
+ current_lora_key = PENDING_LORA_KEY
558
+ print("[LoRA] Prepared LoRA state applied to the pipeline.")
559
+ return True
560
+
561
+ # Preload all models for ZeroGPU tensor packing.
562
+ print("Preloading all models (including Gemma and audio components)...")
563
+ ledger = pipeline.model_ledger
564
+
565
+ # Save the original factory methods so we can rebuild individual components later.
566
+ # These are bound callables on ledger that will call the builder when invoked.
567
+ _orig_transformer_factory = ledger.transformer
568
+ _orig_video_encoder_factory = ledger.video_encoder
569
+ _orig_video_decoder_factory = ledger.video_decoder
570
+ _orig_audio_encoder_factory = ledger.audio_encoder
571
+ _orig_audio_decoder_factory = ledger.audio_decoder
572
+ _orig_vocoder_factory = ledger.vocoder
573
+ _orig_spatial_upsampler_factory = ledger.spatial_upsampler
574
+ _orig_text_encoder_factory = ledger.text_encoder
575
+ _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
576
+
577
+ # Call the original factories once to create the cached instances we will serve by default.
578
+ _transformer = _orig_transformer_factory()
579
+ _video_encoder = _orig_video_encoder_factory()
580
+ _video_decoder = _orig_video_decoder_factory()
581
+ _audio_encoder = _orig_audio_encoder_factory()
582
+ _audio_decoder = _orig_audio_decoder_factory()
583
+ _vocoder = _orig_vocoder_factory()
584
+ _spatial_upsampler = _orig_spatial_upsampler_factory()
585
+ _text_encoder = _orig_text_encoder_factory()
586
+ _embeddings_processor = _orig_gemma_embeddings_factory()
587
+
588
+ # Replace ledger methods with lightweight lambdas that return the cached instances.
589
+ # We keep the original factories above so we can call them later to rebuild components.
590
+ ledger.transformer = lambda: _transformer
591
+ ledger.video_encoder = lambda: _video_encoder
592
+ ledger.video_decoder = lambda: _video_decoder
593
+ ledger.audio_encoder = lambda: _audio_encoder
594
+ ledger.audio_decoder = lambda: _audio_decoder
595
+ ledger.vocoder = lambda: _vocoder
596
+ ledger.spatial_upsampler = lambda: _spatial_upsampler
597
+ ledger.text_encoder = lambda: _text_encoder
598
+ ledger.gemma_embeddings_processor = lambda: _embeddings_processor
599
+
600
+ print("All models preloaded (including Gemma text encoder and audio encoder)!")
601
+ # ---- REPLACE PRELOAD BLOCK END ----
602
+
603
+ print("=" * 80)
604
+ print("Pipeline ready!")
605
+ print("=" * 80)
606
+
607
+ def log_memory(tag: str):
608
+ if torch.cuda.is_available():
609
+ allocated = torch.cuda.memory_allocated() / 1024**3
610
+ peak = torch.cuda.max_memory_allocated() / 1024**3
611
+ free, total = torch.cuda.mem_get_info()
612
+ print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
613
+
614
+
615
+ def detect_aspect_ratio(image) -> str:
616
+ if image is None:
617
+ return "16:9"
618
+ if hasattr(image, "size"):
619
+ w, h = image.size
620
+ elif hasattr(image, "shape"):
621
+ h, w = image.shape[:2]
622
+ else:
623
+ return "16:9"
624
+ ratio = w / h
625
+ candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
626
+ return min(candidates, key=lambda k: abs(ratio - candidates[k]))
627
+
628
+
629
+ def on_image_upload(first_image, last_image, high_res):
630
+ ref_image = first_image if first_image is not None else last_image
631
+ aspect = detect_aspect_ratio(ref_image)
632
+ tier = "high" if high_res else "low"
633
+ w, h = RESOLUTIONS[tier][aspect]
634
+ return gr.update(value=w), gr.update(value=h)
635
+
636
+
637
+ def on_highres_toggle(first_image, last_image, high_res):
638
+ ref_image = first_image if first_image is not None else last_image
639
+ aspect = detect_aspect_ratio(ref_image)
640
+ tier = "high" if high_res else "low"
641
+ w, h = RESOLUTIONS[tier][aspect]
642
+ return gr.update(value=w), gr.update(value=h)
643
+
644
+ def get_gpu_duration(
645
+ first_image,
646
+ last_image,
647
+ input_audio,
648
+ prompt: str,
649
+ negative_prompt: str,
650
+ duration: float,
651
+ gpu_duration: float,
652
+ enhance_prompt: bool = True,
653
+ seed: int = 42,
654
+ randomize_seed: bool = True,
655
+ height: int = 1024,
656
+ width: int = 1536,
657
+ video_cfg_scale: float = 1.0,
658
+ video_stg_scale: float = 0.0,
659
+ video_rescale_scale: float = 0.45,
660
+ video_a2v_scale: float = 3.0,
661
+ audio_cfg_scale: float = 1.0,
662
+ audio_stg_scale: float = 0.0,
663
+ audio_rescale_scale: float = 1.0,
664
+ audio_v2a_scale: float = 3.0,
665
+ pose_strength: float = 0.0,
666
+ general_strength: float = 0.0,
667
+ motion_strength: float = 0.0,
668
+ dreamlay_strength: float = 0.0,
669
+ mself_strength: float = 0.0,
670
+ dramatic_strength: float = 0.0,
671
+ fluid_strength: float = 0.0,
672
+ liquid_strength: float = 0.0,
673
+ demopose_strength: float = 0.0,
674
+ voice_strength: float = 0.0,
675
+ realism_strength: float = 0.0,
676
+ transition_strength: float = 0.0,
677
+ progress=None,
678
+ ):
679
+ return int(gpu_duration)
680
+
681
+ @spaces.GPU(duration=get_gpu_duration)
682
+ @torch.inference_mode()
683
+ def generate_video(
684
+ first_image,
685
+ last_image,
686
+ input_audio,
687
+ prompt: str,
688
+ negative_prompt: str,
689
+ duration: float,
690
+ gpu_duration: float,
691
+ enhance_prompt: bool = True,
692
+ seed: int = 42,
693
+ randomize_seed: bool = True,
694
+ height: int = 1024,
695
+ width: int = 1536,
696
+ video_cfg_scale: float = 1.0,
697
+ video_stg_scale: float = 0.0,
698
+ video_rescale_scale: float = 0.45,
699
+ video_a2v_scale: float = 3.0,
700
+ audio_cfg_scale: float = 1.0,
701
+ audio_stg_scale: float = 0.0,
702
+ audio_rescale_scale: float = 1.0,
703
+ audio_v2a_scale: float = 3.0,
704
+ pose_strength: float = 0.0,
705
+ general_strength: float = 0.0,
706
+ motion_strength: float = 0.0,
707
+ dreamlay_strength: float = 0.0,
708
+ mself_strength: float = 0.0,
709
+ dramatic_strength: float = 0.0,
710
+ fluid_strength: float = 0.0,
711
+ liquid_strength: float = 0.0,
712
+ demopose_strength: float = 0.0,
713
+ voice_strength: float = 0.0,
714
+ realism_strength: float = 0.0,
715
+ transition_strength: float = 0.0,
716
+ progress=gr.Progress(track_tqdm=True),
717
+ ):
718
+ try:
719
+ torch.cuda.reset_peak_memory_stats()
720
+ log_memory("start")
721
+
722
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
723
+
724
+ frame_rate = DEFAULT_FRAME_RATE
725
+ num_frames = int(duration * frame_rate) + 1
726
+ num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
727
+
728
+ print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
729
+
730
+ images = []
731
+ output_dir = Path("outputs")
732
+ output_dir.mkdir(exist_ok=True)
733
+
734
+ if first_image is not None:
735
+ temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
736
+ if hasattr(first_image, "save"):
737
+ first_image.save(temp_first_path)
738
+ else:
739
+ temp_first_path = Path(first_image)
740
+ images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
741
+
742
+ if last_image is not None:
743
+ temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
744
+ if hasattr(last_image, "save"):
745
+ last_image.save(temp_last_path)
746
+ else:
747
+ temp_last_path = Path(last_image)
748
+ images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
749
+
750
+ tiling_config = TilingConfig.default()
751
+ video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
752
+
753
+ video_guider_params = MultiModalGuiderParams(
754
+ cfg_scale=video_cfg_scale,
755
+ stg_scale=video_stg_scale,
756
+ rescale_scale=video_rescale_scale,
757
+ modality_scale=video_a2v_scale,
758
+ skip_step=0,
759
+ stg_blocks=[],
760
+ )
761
+
762
+ audio_guider_params = MultiModalGuiderParams(
763
+ cfg_scale=audio_cfg_scale,
764
+ stg_scale=audio_stg_scale,
765
+ rescale_scale=audio_rescale_scale,
766
+ modality_scale=audio_v2a_scale,
767
+ skip_step=0,
768
+ stg_blocks=[],
769
+ )
770
+
771
+ log_memory("before pipeline call")
772
+
773
+ apply_prepared_lora_state_to_pipeline()
774
+
775
+ video, audio = pipeline(
776
+ prompt=prompt,
777
+ negative_prompt=negative_prompt,
778
+ seed=current_seed,
779
+ height=int(height),
780
+ width=int(width),
781
+ num_frames=num_frames,
782
+ frame_rate=frame_rate,
783
+ video_guider_params=video_guider_params,
784
+ audio_guider_params=audio_guider_params,
785
+ images=images,
786
+ audio_path=input_audio,
787
+ tiling_config=tiling_config,
788
+ enhance_prompt=enhance_prompt,
789
+ )
790
+
791
+ log_memory("after pipeline call")
792
+
793
+ output_path = tempfile.mktemp(suffix=".mp4")
794
+ encode_video(
795
+ video=video,
796
+ fps=frame_rate,
797
+ audio=audio,
798
+ output_path=output_path,
799
+ video_chunks_number=video_chunks_number,
800
+ )
801
+
802
+ log_memory("after encode_video")
803
+ return str(output_path), current_seed
804
+
805
+ except Exception as e:
806
+ import traceback
807
+ log_memory("on error")
808
+ print(f"Error: {str(e)}\n{traceback.format_exc()}")
809
+ return None, current_seed
810
+
811
+ # =============================================================================
812
+ # Gradio UI
813
+ # =============================================================================
814
+
815
+ css = """
816
+ .fillable {max-width: 1200px !important}
817
+ .progress-text {color: black}
818
+ """
819
+
820
+ with gr.Blocks(title="LTX-2.3 Distilled with LoRAs, Negative Prompting, and Advanced Settings") as demo:
821
+ gr.Markdown("# LTX-2.3 Two-Stage HQ Video Generation")
822
+ gr.Markdown(
823
+ "High-quality text/image-to-video with cached LoRA state + CFG guidance. "
824
+ "[[Model]](https://huggingface.co/Lightricks/LTX-2.3)"
825
+ )
826
+
827
+ with gr.Row():
828
+ # LEFT SIDE: Input Controls
829
+ with gr.Column():
830
+ with gr.Row():
831
+ first_image = gr.Image(label="First Frame (Optional)", type="pil")
832
+ last_image = gr.Image(label="Last Frame (Optional)", type="pil")
833
+
834
+ prompt = gr.Textbox(
835
+ label="Prompt",
836
+ value="Make this image come alive with cinematic motion, smooth animation",
837
+ lines=3,
838
+ placeholder="Describe the motion and animation you want...",
839
+ )
840
+
841
+ negative_prompt = gr.Textbox(
842
+ label="Negative Prompt",
843
+ value="List what you specifically don't want",
844
+ lines=2,
845
+ )
846
+
847
+ duration = gr.Slider(
848
+ label="Duration (seconds)",
849
+ minimum=1.0, maximum=30.0, value=10.0, step=0.1,
850
+ )
851
+
852
+ with gr.Row():
853
+ seed = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=MAX_SEED)
854
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
855
+
856
+ with gr.Row():
857
+ high_res = gr.Checkbox(label="High Resolution", value=True)
858
+ enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
859
+
860
+ with gr.Row():
861
+ width = gr.Number(label="Width", value=1536, precision=0)
862
+ height = gr.Number(label="Height", value=1024, precision=0)
863
+
864
+ generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
865
+
866
+ with gr.Accordion("Advanced Settings", open=False):
867
+ gr.Markdown("### Video Guidance Parameters")
868
+
869
+ with gr.Row():
870
+ video_cfg_scale = gr.Slider(
871
+ label="Video CFG Scale", minimum=1.0, maximum=10.0, value=1.0, step=0.1
872
+ )
873
+ video_stg_scale = gr.Slider(
874
+ label="Video STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1
875
+ )
876
+
877
+ with gr.Row():
878
+ video_rescale_scale = gr.Slider(
879
+ label="Video Rescale", minimum=0.0, maximum=2.0, value=0.45, step=0.1
880
+ )
881
+ video_a2v_scale = gr.Slider(
882
+ label="A2V Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1
883
+ )
884
+
885
+ gr.Markdown("### Audio Guidance Parameters")
886
+
887
+ with gr.Row():
888
+ audio_cfg_scale = gr.Slider(
889
+ label="Audio CFG Scale", minimum=1.0, maximum=15.0, value=1.0, step=0.1
890
+ )
891
+ audio_stg_scale = gr.Slider(
892
+ label="Audio STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1
893
+ )
894
+
895
+ with gr.Row():
896
+ audio_rescale_scale = gr.Slider(
897
+ label="Audio Rescale", minimum=0.0, maximum=2.0, value=1.0, step=0.1
898
+ )
899
+ audio_v2a_scale = gr.Slider(
900
+ label="V2A Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1
901
+ )
902
+ with gr.Row():
903
+ input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
904
+
905
+ # RIGHT SIDE: Output and LoRA
906
+ with gr.Column():
907
+ output_video = gr.Video(label="Generated Video", autoplay=False)
908
+
909
+ gpu_duration = gr.Slider(
910
+ label="ZeroGPU duration (seconds)",
911
+ minimum=30.0, maximum=240.0, value=90.0, step=1.0,
912
+ info="Increase for longer videos, higher resolution, or LoRA usage"
913
+ )
914
+
915
+ gr.Markdown("### LoRA Adapter Strengths")
916
+ gr.Markdown("Set to 0 to disable, then click 'Prepare LoRA Cache'")
917
+
918
+ with gr.Row():
919
+ pose_strength = gr.Slider(label="Anthro Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
920
+ gr.Markdown("") # Spacer for alignment
921
+
922
+ with gr.Row():
923
+ general_strength = gr.Slider(label="Reasoning Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
924
+ motion_strength = gr.Slider(label="Anthro Posing", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
925
+
926
+ with gr.Row():
927
+ dreamlay_strength = gr.Slider(label="Dreamlay", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
928
+ mself_strength = gr.Slider(label="Mself", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
929
+
930
+ with gr.Row():
931
+ dramatic_strength = gr.Slider(label="Dramatic", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
932
+ fluid_strength = gr.Slider(label="Fluid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
933
+
934
+ with gr.Row():
935
+ liquid_strength = gr.Slider(label="Liquid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
936
+ demopose_strength = gr.Slider(label="Audio Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
937
+
938
+ with gr.Row():
939
+ voice_strength = gr.Slider(label="Voice Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
940
+ realism_strength = gr.Slider(label="Anthro Realism", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
941
+
942
+ with gr.Row():
943
+ transition_strength = gr.Slider(label="POV", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
944
+ gr.Markdown("") # Spacer for alignment
945
+
946
+ prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
947
+ lora_status = gr.Textbox(
948
+ label="LoRA Cache Status",
949
+ value="No LoRA state prepared yet.",
950
+ interactive=False,
951
+ )
952
+
953
+ # Event handlers
954
+ first_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height])
955
+ last_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height])
956
+ high_res.change(fn=on_highres_toggle, inputs=[first_image, last_image, high_res], outputs=[width, height])
957
+
958
+ prepare_lora_btn.click(
959
+ fn=prepare_lora_cache,
960
+ inputs=[pose_strength, general_strength, motion_strength, dreamlay_strength,
961
+ mself_strength, dramatic_strength, fluid_strength, liquid_strength,
962
+ demopose_strength, voice_strength, realism_strength, transition_strength],
963
+ outputs=[lora_status],
964
+ )
965
+
966
+ generate_btn.click(
967
+ fn=generate_video,
968
+ inputs=[
969
+ first_image, last_image, input_audio, prompt, negative_prompt, duration, gpu_duration,
970
+ enhance_prompt, seed, randomize_seed, height, width,
971
+ video_cfg_scale, video_stg_scale, video_rescale_scale, video_a2v_scale,
972
+ audio_cfg_scale, audio_stg_scale, audio_rescale_scale, audio_v2a_scale,
973
+ pose_strength, general_strength, motion_strength,
974
+ dreamlay_strength, mself_strength, dramatic_strength, fluid_strength,
975
+ liquid_strength, demopose_strength, voice_strength, realism_strength,
976
+ transition_strength,
977
+ ],
978
+ outputs=[output_video, seed],
979
+ )
980
+
981
+
982
+ if __name__ == "__main__":
983
+ demo.queue().launch(theme=gr.themes.Citrus(), css=css, mcp_server=False)